diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-11-04 11:54:15 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-04 11:59:45 +0100 |
commit | a2e71271535fde493c32803b1f34789f97efcb5e (patch) | |
tree | 90d7139bea2f49e947f27af92614fa6eca50b64d /drivers | |
parent | 6d7aa9d721c8c640066142fd9534afcdf68d7f9d (diff) | |
parent | b419148e567728f6af0c3b01965c1cc141e3e13a (diff) |
Merge commit 'v2.6.32-rc6' into perf/core
Conflicts:
tools/perf/Makefile
Merge reason: Resolve the conflict, merge to upstream and merge in
perf fixes so we can add a dependent patch.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers')
299 files changed, 13394 insertions, 5071 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 0ed42d8870c..93d2c7971df 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -218,10 +218,10 @@ config ACPI_PROCESSOR_AGGREGATOR depends on X86 help ACPI 4.0 defines processor Aggregator, which enables OS to perform - specfic processor configuration and control that applies to all + specific processor configuration and control that applies to all processors in the platform. Currently only logical processor idling is defined, which is to reduce power consumption. This driver - support the new device. + supports the new device. config ACPI_THERMAL tristate "Thermal Zone" diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 98b9690b015..b6ed60b57b0 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -245,6 +245,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, (u32) ac->state); + acpi_notifier_call_chain(device, event, (u32) ac->state); #ifdef CONFIG_ACPI_SYSFS_POWER kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); #endif diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 9335b87c517..0c9c6a9a002 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -251,6 +251,9 @@ int acpi_lid_open(void) acpi_status status; unsigned long long state; + if (!lid_device) + return -ENODEV; + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, &state); if (ACPI_FAILURE(status)) diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 31122214e0e..1af808171d4 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -389,6 +389,17 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) pbus = pdev->subordinate; pci_dev_put(pdev); + + /* + * This function may be called for a non-PCI device that has a + * PCI parent (eg. a disk under a PCI SATA controller). In that + * case pdev->subordinate will be NULL for the parent. + */ + if (!pbus) { + dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n"); + pdev = NULL; + break; + } } out: list_for_each_entry_safe(node, tmp, &device_list, node) diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index f6e54bf8dd9..64e3c581b7a 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -1109,7 +1109,12 @@ static int acpi_video_bus_check(struct acpi_video_bus *video) */ /* Does this device support video switching? */ - if (video->cap._DOS) { + if (video->cap._DOS || video->cap._DOD) { + if (!video->cap._DOS) { + printk(KERN_WARNING FW_BUG + "ACPI(%s) defines _DOD but not _DOS\n", + acpi_device_bid(video->device)); + } video->flags.multihead = 1; status = 0; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 7032f25da9b..575593a8b4e 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -84,7 +84,7 @@ long acpi_is_video_device(struct acpi_device *device) return 0; /* Does this device able to support video switching ? */ - if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) && + if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index b1a257746a1..a06f5d6375a 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -575,7 +575,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */ /* AMD */ - { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD SB900 */ + { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, @@ -605,6 +605,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */ { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */ + { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */ { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */ { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */ diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b525a098134..d7f0f1b1ae3 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5028,12 +5028,14 @@ void ata_qc_complete(struct ata_queued_cmd *qc) qc->flags |= ATA_QCFLAG_FAILED; if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { - if (!ata_tag_internal(qc->tag)) { - /* always fill result TF for failed qc */ - fill_result_tf(qc); + /* always fill result TF for failed qc */ + fill_result_tf(qc); + + if (!ata_tag_internal(qc->tag)) ata_qc_schedule_eh(qc); - return; - } + else + __ata_qc_complete(qc); + return; } WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 0a97822da21..bba2ae5df1c 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2981,12 +2981,14 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link, * device detection messages backwards. */ ata_for_each_dev(dev, link, ALL) { - if (!(new_mask & (1 << dev->devno)) || - dev->class == ATA_DEV_PMP) + if (!(new_mask & (1 << dev->devno))) continue; dev->class = ehc->classes[dev->devno]; + if (dev->class == ATA_DEV_PMP) + continue; + ehc->i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ehc->i.flags &= ~ATA_EHI_PRINTINFO; diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c index aa4b3f6ae77..ae4454d4e95 100644 --- a/drivers/ata/pata_atiixp.c +++ b/drivers/ata/pata_atiixp.c @@ -246,7 +246,7 @@ static const struct pci_device_id atiixp[] = { { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), }, - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), }, { }, }; diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c index f49814d6fd2..3bbed8322ec 100644 --- a/drivers/ata/pata_sc1200.c +++ b/drivers/ata/pata_sc1200.c @@ -235,8 +235,7 @@ static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id) .udma_mask = ATA_UDMA2, .port_ops = &sc1200_port_ops }; - /* Can't enable port 2 yet, see top comments */ - const struct ata_port_info *ppi[] = { &info, }; + const struct ata_port_info *ppi[] = { &info, NULL }; return ata_pci_sff_init_one(dev, ppi, &sc1200_sht, NULL); } diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index 45657cacec4..88984b803d6 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -111,7 +111,7 @@ static const struct via_isa_bridge { { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA }, { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES }, - { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES }, + { "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0xff, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES }, { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 17f9ff9067a..6f5093b7c8c 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -1382,6 +1382,25 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) */ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) return ATA_DEFER_PORT; + + /* PIO commands need exclusive link: no other commands [DMA or PIO] + * can run concurrently. + * set excl_link when we want to send a PIO command in DMA mode + * or a non-NCQ command in NCQ mode. + * When we receive a command from that link, and there are no + * outstanding commands, mark a flag to clear excl_link and let + * the command go through. + */ + if (unlikely(ap->excl_link)) { + if (link == ap->excl_link) { + if (ap->nr_active_links) + return ATA_DEFER_PORT; + qc->flags |= ATA_QCFLAG_CLEAR_EXCL; + return 0; + } else + return ATA_DEFER_PORT; + } + /* * If the port is completely idle, then allow the new qc. */ @@ -1395,8 +1414,14 @@ static int mv_qc_defer(struct ata_queued_cmd *qc) * doesn't allow it. */ if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && - (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol)) - return 0; + (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { + if (ata_is_ncq(qc->tf.protocol)) + return 0; + else { + ap->excl_link = link; + return ATA_DEFER_PORT; + } + } return ATA_DEFER_PORT; } diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 86a40582999..1eb4e020eb5 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -1594,9 +1594,21 @@ static int nv_hardreset(struct ata_link *link, unsigned int *class, !ata_dev_enabled(link->device)) sata_link_hardreset(link, sata_deb_timing_hotplug, deadline, NULL, NULL); - else if (!(ehc->i.flags & ATA_EHI_QUIET)) - ata_link_printk(link, KERN_INFO, - "nv: skipping hardreset on occupied port\n"); + else { + const unsigned long *timing = sata_ehc_deb_timing(ehc); + int rc; + + if (!(ehc->i.flags & ATA_EHI_QUIET)) + ata_link_printk(link, KERN_INFO, "nv: skipping " + "hardreset on occupied port\n"); + + /* make sure the link is online */ + rc = sata_link_resume(link, timing, deadline); + /* whine about phy resume failure but proceed */ + if (rc && rc != -EOPNOTSUPP) + ata_link_printk(link, KERN_WARNING, "failed to resume " + "link (errno=%d)\n", rc); + } /* device signature acquisition is unreliable */ return -EAGAIN; diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 973bf2ad4e0..63c143e54a5 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -689,15 +689,19 @@ int bus_add_driver(struct device_driver *drv) printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", __func__, drv->name); } - error = add_bind_files(drv); - if (error) { - /* Ditto */ - printk(KERN_ERR "%s: add_bind_files(%s) failed\n", - __func__, drv->name); + + if (!drv->suppress_bind_attrs) { + error = add_bind_files(drv); + if (error) { + /* Ditto */ + printk(KERN_ERR "%s: add_bind_files(%s) failed\n", + __func__, drv->name); + } } kobject_uevent(&priv->kobj, KOBJ_ADD); return 0; + out_unregister: kfree(drv->p); drv->p = NULL; @@ -720,7 +724,8 @@ void bus_remove_driver(struct device_driver *drv) if (!drv->bus) return; - remove_bind_files(drv); + if (!drv->suppress_bind_attrs) + remove_bind_files(drv); driver_remove_attrs(drv->bus, drv); driver_remove_file(drv, &driver_attr_uevent); klist_remove(&drv->p->knode_bus); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index ed2ebd3c287..f367885a764 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -236,7 +236,7 @@ int driver_register(struct device_driver *drv) put_driver(other); printk(KERN_ERR "Error: Driver '%s' is already registered, " "aborting...\n", drv->name); - return -EEXIST; + return -EBUSY; } ret = bus_add_driver(drv); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ed156a13aa4..4fa954b07ac 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -521,11 +521,15 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, { int retval, code; + /* make sure driver won't have bind/unbind attributes */ + drv->driver.suppress_bind_attrs = true; + /* temporary section violation during probe() */ drv->probe = probe; retval = code = platform_driver_register(drv); - /* Fixup that section violation, being paranoid about code scanning + /* + * Fixup that section violation, being paranoid about code scanning * the list of drivers in order to probe new devices. Check to see * if the probe was successful, and make sure any forced probes of * new devices fail. diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e0dc4071e08..8aa2443182d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -511,6 +511,7 @@ static void dpm_complete(pm_message_t state) INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); + transition_started = false; while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.prev); diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 38556f6cc22..a770498a74e 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -51,8 +51,6 @@ static int __pm_runtime_idle(struct device *dev) { int retval = 0; - dev_dbg(dev, "__pm_runtime_idle()!\n"); - if (dev->power.runtime_error) retval = -EINVAL; else if (dev->power.idle_notification) @@ -93,8 +91,6 @@ static int __pm_runtime_idle(struct device *dev) wake_up_all(&dev->power.wait_queue); out: - dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval); - return retval; } diff --git a/drivers/block/loop.c b/drivers/block/loop.c index edda9ea7c62..bd112c8c7bc 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -949,7 +949,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev) lo->lo_state = Lo_unbound; /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); - if (max_part > 0) + if (max_part > 0 && bdev) ioctl_by_bdev(bdev, BLKRRPART, 0); mutex_unlock(&lo->lo_ctl_mutex); /* diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 43f19389647..51042f0ba7e 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -3,7 +3,6 @@ #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/virtio.h> -#include <linux/virtio_ids.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> @@ -183,34 +182,6 @@ static void do_virtblk_request(struct request_queue *q) vblk->vq->vq_ops->kick(vblk->vq); } -/* return ATA identify data - */ -static int virtblk_identify(struct gendisk *disk, void *argp) -{ - struct virtio_blk *vblk = disk->private_data; - void *opaque; - int err = -ENOMEM; - - opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL); - if (!opaque) - goto out; - - err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY, - offsetof(struct virtio_blk_config, identify), opaque, - VIRTIO_BLK_ID_BYTES); - - if (err) - goto out_kfree; - - if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES)) - err = -EFAULT; - -out_kfree: - kfree(opaque); -out: - return err; -} - static void virtblk_prepare_flush(struct request_queue *q, struct request *req) { req->cmd_type = REQ_TYPE_LINUX_BLOCK; @@ -222,10 +193,6 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; - void __user *argp = (void __user *)data; - - if (cmd == HDIO_GET_IDENTITY) - return virtblk_identify(disk, argp); /* * Only allow the generic SCSI ioctls if the host can support it. @@ -233,7 +200,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); + return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, + (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ @@ -332,7 +300,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) } vblk->disk->queue->queuedata = vblk; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); if (index < 26) { sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26); @@ -445,7 +412,7 @@ static struct virtio_device_id id_table[] = { static unsigned int features[] = { VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, - VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY, VIRTIO_BLK_F_FLUSH + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH }; /* diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 08a6f50ae79..6aad99ec4e0 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -323,7 +323,7 @@ config SPECIALIX config SX tristate "Specialix SX (and SI) card support" - depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) + depends on SERIAL_NONSTANDARD && (PCI || EISA || ISA) && BROKEN help This is a driver for the SX and SI multiport serial cards. Please read the file <file:Documentation/serial/sx.txt> for details. @@ -334,7 +334,7 @@ config SX config RIO tristate "Specialix RIO system support" - depends on SERIAL_NONSTANDARD + depends on SERIAL_NONSTANDARD && BROKEN help This is a driver for the Specialix RIO, a smart serial card which drives an outboard box that can support up to 128 ports. Product @@ -395,7 +395,7 @@ config NOZOMI config A2232 tristate "Commodore A2232 serial support (EXPERIMENTAL)" - depends on EXPERIMENTAL && ZORRO && BROKEN_ON_SMP + depends on EXPERIMENTAL && ZORRO && BROKEN ---help--- This option supports the 2232 7-port serial card shipped with the Amiga 2000 and other Zorro-bus machines, dating from 1989. At diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 962968f05b9..915157fcff9 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -21,7 +21,6 @@ #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/virtio.h> -#include <linux/virtio_ids.h> #include <linux/virtio_rng.h> /* The host will fill any buffer we give it with sweet, sweet randomness. We @@ -117,7 +116,7 @@ static int virtrng_probe(struct virtio_device *vdev) return 0; } -static void virtrng_remove(struct virtio_device *vdev) +static void __devexit virtrng_remove(struct virtio_device *vdev) { vdev->config->reset(vdev); hwrng_unregister(&virtio_hwrng); diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 0d328b59568..a035ae39a35 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -31,7 +31,6 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/virtio.h> -#include <linux/virtio_ids.h> #include <linux/virtio_console.h> #include "hvc_console.h" diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index 6b36ee56e6f..ed86d3bf249 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c @@ -1532,7 +1532,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file, case PIO_UNIMAP: case GIO_UNIMAP: - ret = do_unimap_ioctl(cmd, up, perm, vc); + ret = compat_unimap_ioctl(cmd, up, perm, vc); break; /* diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index ad41f19b8e3..12fdd3987a3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -76,8 +76,11 @@ static void cpuidle_idle_call(void) #endif /* ask the governor for the next state */ next_state = cpuidle_curr_governor->select(dev); - if (need_resched()) + if (need_resched()) { + local_irq_enable(); return; + } + target_state = &dev->states[next_state]; /* enter the state and update stats */ diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 76cb6b345e7..0af80577dc7 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -24,6 +24,12 @@ #include <asm/i387.h> #include "padlock.h" +#ifdef CONFIG_64BIT +#define STACK_ALIGN 16 +#else +#define STACK_ALIGN 4 +#endif + struct padlock_sha_desc { struct shash_desc fallback; }; @@ -64,7 +70,9 @@ static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha1_state state; unsigned int space; @@ -128,7 +136,9 @@ static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, /* We can't store directly to *out as it may be unaligned. */ /* BTW Don't reduce the buffer size below 128 Bytes! * PadLock microcode needs it that big. */ - char result[128] __attribute__ ((aligned(PADLOCK_ALIGNMENT))); + char buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __attribute__ + ((aligned(STACK_ALIGN))); + char *result = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); struct padlock_sha_desc *dctx = shash_desc_ctx(desc); struct sha256_state state; unsigned int space; diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 4f4ac82382f..d4560d9d5a8 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -1122,7 +1122,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) debugf0("Reading K8_DRAM_BASE_LOW failed\n"); /* Extract parts into separate data entries */ - pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24; + pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; pvt->dram_rw_en[dram] = (low & 0x3); @@ -1135,7 +1135,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * Extract parts into separate data entries. Limit is the HIGHEST memory * location of the region, so lower 24 bits need to be all ones */ - pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF; + pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; pvt->dram_DstNode[dram] = (low & 0x7); } @@ -1369,7 +1369,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | - (((u64)low_base & 0xFFFF0000) << 24); + (((u64)low_base & 0xFFFF0000) << 8); low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); @@ -1391,7 +1391,7 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * memory location of the region, so low 24 bits need to be all ones. */ pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | - (((u64) low_limit & 0xFFFF0000) << 24) | + (((u64) low_limit & 0xFFFF0000) << 8) | 0x00FFFFFF; } diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index d335086f4a2..77a9579d716 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -1173,7 +1173,7 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci) pci_read_config_word(pvt->branch_1, where, &pvt->b1_mtr[slot_row]); debugf2("MTR%d where=0x%x B1 value=0x%x\n", slot_row, - where, pvt->b0_mtr[slot_row]); + where, pvt->b1_mtr[slot_row]); } else { pvt->b1_mtr[slot_row] = 0; } @@ -1232,7 +1232,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) struct csrow_info *p_csrow; int empty, channel_count; int max_csrows; - int mtr; + int mtr, mtr1; int csrow_megs; int channel; int csrow; @@ -1251,9 +1251,10 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) /* use branch 0 for the basis */ mtr = pvt->b0_mtr[csrow >> 1]; + mtr1 = pvt->b1_mtr[csrow >> 1]; /* if no DIMMS on this row, continue */ - if (!MTR_DIMMS_PRESENT(mtr)) + if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1)) continue; /* FAKE OUT VALUES, FIXME */ diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index b08b6d8e2dc..f99d10655ed 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c @@ -46,9 +46,10 @@ /* Limits for i5400 */ #define NUM_MTRS_PER_BRANCH 4 #define CHANNELS_PER_BRANCH 2 +#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH #define MAX_CHANNELS 4 -#define MAX_DIMMS (MAX_CHANNELS * 4) /* Up to 4 DIMM's per channel */ -#define MAX_CSROWS (MAX_DIMMS * 2) /* max possible csrows per channel */ +/* max possible csrows per channel */ +#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL) /* Device 16, * Function 0: System Address @@ -331,7 +332,6 @@ static const struct i5400_dev_info i5400_devs[] = { struct i5400_dimm_info { int megabytes; /* size, 0 means not present */ - int dual_rank; }; /* driver private data structure */ @@ -849,11 +849,9 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) int n; /* There is one MTR for each slot pair of FB-DIMMs, - Each slot may have one or two ranks (2 csrows), Each slot pair may be at branch 0 or branch 1. - So, csrow should be divided by eight */ - n = csrow >> 3; + n = csrow; if (n >= NUM_MTRS_PER_BRANCH) { debugf0("ERROR: trying to access an invalid csrow: %d\n", @@ -905,25 +903,22 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, amb_present_reg = determine_amb_present_reg(pvt, channel); /* Determine if there is a DIMM present in this DIMM slot */ - if (amb_present_reg & (1 << (csrow >> 1))) { - dinfo->dual_rank = MTR_DIMM_RANK(mtr); - - if (!((dinfo->dual_rank == 0) && - ((csrow & 0x1) == 0x1))) { - /* Start with the number of bits for a Bank - * on the DRAM */ - addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); - /* Add thenumber of ROW bits */ - addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); - /* add the number of COLUMN bits */ - addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); - - addrBits += 6; /* add 64 bits per DIMM */ - addrBits -= 20; /* divide by 2^^20 */ - addrBits -= 3; /* 8 bits per bytes */ - - dinfo->megabytes = 1 << addrBits; - } + if (amb_present_reg & (1 << csrow)) { + /* Start with the number of bits for a Bank + * on the DRAM */ + addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); + /* Add thenumber of ROW bits */ + addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); + /* add the number of COLUMN bits */ + addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); + /* add the number of RANK bits */ + addrBits += MTR_DIMM_RANK(mtr); + + addrBits += 6; /* add 64 bits per DIMM */ + addrBits -= 20; /* divide by 2^^20 */ + addrBits -= 3; /* 8 bits per bytes */ + + dinfo->megabytes = 1 << addrBits; } } } @@ -951,12 +946,12 @@ static void calculate_dimm_size(struct i5400_pvt *pvt) return; } - /* Scan all the actual CSROWS (which is # of DIMMS * 2) + /* Scan all the actual CSROWS * and calculate the information for each DIMM * Start with the highest csrow first, to display it first * and work toward the 0th csrow */ - max_csrows = pvt->maxdimmperch * 2; + max_csrows = pvt->maxdimmperch; for (csrow = max_csrows - 1; csrow >= 0; csrow--) { /* on an odd csrow, first output a 'boundary' marker, @@ -1064,7 +1059,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci) /* Get the set of MTR[0-3] regs by each branch */ for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { - int where = MTR0 + (slot_row * sizeof(u32)); + int where = MTR0 + (slot_row * sizeof(u16)); /* Branch 0 set of MTR registers */ pci_read_config_word(pvt->branch_0, where, @@ -1146,7 +1141,7 @@ static int i5400_init_csrows(struct mem_ctl_info *mci) pvt = mci->pvt_info; channel_count = pvt->maxch; - max_csrows = pvt->maxdimmperch * 2; + max_csrows = pvt->maxdimmperch; empty = 1; /* Assume NO memory */ @@ -1215,28 +1210,6 @@ static void i5400_enable_error_reporting(struct mem_ctl_info *mci) } /* - * i5400_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) - * - * ask the device how many channels are present and how many CSROWS - * as well - */ -static void i5400_get_dimm_and_channel_counts(struct pci_dev *pdev, - int *num_dimms_per_channel, - int *num_channels) -{ - u8 value; - - /* Need to retrieve just how many channels and dimms per channel are - * supported on this memory controller - */ - pci_read_config_byte(pdev, MAXDIMMPERCH, &value); - *num_dimms_per_channel = (int)value * 2; - - pci_read_config_byte(pdev, MAXCH, &value); - *num_channels = (int)value; -} - -/* * i5400_probe1 Probe for ONE instance of device to see if it is * present. * return: @@ -1263,22 +1236,16 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx) if (PCI_FUNC(pdev->devfn) != 0) return -ENODEV; - /* Ask the devices for the number of CSROWS and CHANNELS so - * that we can calculate the memory resources, etc - * - * The Chipset will report what it can handle which will be greater - * or equal to what the motherboard manufacturer will implement. - * - * As we don't have a motherboard identification routine to determine + /* As we don't have a motherboard identification routine to determine * actual number of slots/dimms per channel, we thus utilize the * resource as specified by the chipset. Thus, we might have * have more DIMMs per channel than actually on the mobo, but this * allows the driver to support upto the chipset max, without * some fancy mobo determination. */ - i5400_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, - &num_channels); - num_csrows = num_dimms_per_channel * 2; + num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; + num_channels = MAX_CHANNELS; + num_csrows = num_dimms_per_channel; debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", __func__, num_channels, num_dimms_per_channel, num_csrows); diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c index 157f6504f25..cf27402af97 100644 --- a/drivers/edac/mpc85xx_edac.c +++ b/drivers/edac/mpc85xx_edac.c @@ -26,7 +26,9 @@ #include "mpc85xx_edac.h" static int edac_dev_idx; +#ifdef CONFIG_PCI static int edac_pci_idx; +#endif static int edac_mc_idx; static u32 orig_ddr_err_disable; diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/twl4030-gpio.c index afad1479214..49384a7c549 100644 --- a/drivers/gpio/twl4030-gpio.c +++ b/drivers/gpio/twl4030-gpio.c @@ -460,7 +460,8 @@ no_irqs: return ret; } -static int __devexit gpio_twl4030_remove(struct platform_device *pdev) +/* Cannot use __devexit as gpio_twl4030_probe() calls us */ +static int gpio_twl4030_remove(struct platform_device *pdev) { struct twl4030_gpio_platform_data *pdata = pdev->dev.platform_data; int status; @@ -493,7 +494,7 @@ static struct platform_driver gpio_twl4030_driver = { .driver.name = "twl4030_gpio", .driver.owner = THIS_MODULE, .probe = gpio_twl4030_probe, - .remove = __devexit_p(gpio_twl4030_remove), + .remove = gpio_twl4030_remove, }; static int __init gpio_twl4030_init(void) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3c0d2b3aed7..cea665d86dd 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -626,6 +626,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, return NULL; } + /* it is incorrect if hsync/vsync width is zero */ + if (!hsync_pulse_width || !vsync_pulse_width) { + DRM_DEBUG_KMS("Incorrect Detailed timing. " + "Wrong Hsync/Vsync pulse width\n"); + return NULL; + } mode = drm_mode_create(dev); if (!mode) return NULL; @@ -647,6 +653,15 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, mode->vsync_end = mode->vsync_start + vsync_pulse_width; mode->vtotal = mode->vdisplay + vblank; + /* perform the basic check for the detailed timing */ + if (mode->hsync_end > mode->htotal || + mode->vsync_end > mode->vtotal) { + drm_mode_destroy(dev, mode); + DRM_DEBUG_KMS("Incorrect detailed timing. " + "Sync is beyond the blank.\n"); + return NULL; + } + drm_mode_set_name(mode); if (pt->misc & DRM_EDID_PT_INTERLACED) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 23dc9c115fd..9c924614c41 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -454,22 +454,39 @@ out_free: } EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); -static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, +static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, u16 regno, struct fb_info *info) { struct drm_fb_helper *fb_helper = info->par; struct drm_framebuffer *fb = fb_helper->fb; int pindex; + if (info->fix.visual == FB_VISUAL_TRUECOLOR) { + u32 *palette; + u32 value; + /* place color in psuedopalette */ + if (regno > 16) + return -EINVAL; + palette = (u32 *)info->pseudo_palette; + red >>= (16 - info->var.red.length); + green >>= (16 - info->var.green.length); + blue >>= (16 - info->var.blue.length); + value = (red << info->var.red.offset) | + (green << info->var.green.offset) | + (blue << info->var.blue.offset); + palette[regno] = value; + return 0; + } + pindex = regno; if (fb->bits_per_pixel == 16) { pindex = regno << 3; if (fb->depth == 16 && regno > 63) - return; + return -EINVAL; if (fb->depth == 15 && regno > 31) - return; + return -EINVAL; if (fb->depth == 16) { u16 r, g, b; @@ -493,13 +510,7 @@ static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, if (fb->depth != 16) fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); - - if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { - ((u32 *) fb->pseudo_palette)[regno] = - (regno << info->var.red.offset) | - (regno << info->var.green.offset) | - (regno << info->var.blue.offset); - } + return 0; } int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) @@ -536,7 +547,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) if (transp) htransp = *transp++; - setcolreg(crtc, hred, hgreen, hblue, start++, info); + rc = setcolreg(crtc, hred, hgreen, hblue, start++, info); + if (rc) + return rc; } crtc_funcs->load_lut(crtc); } @@ -555,6 +568,7 @@ int drm_fb_helper_setcolreg(unsigned regno, struct drm_device *dev = fb_helper->dev; struct drm_crtc *crtc; int i; + int ret; if (regno > 255) return 1; @@ -568,8 +582,10 @@ int drm_fb_helper_setcolreg(unsigned regno, if (i == fb_helper->crtc_count) continue; + ret = setcolreg(crtc, red, green, blue, regno, info); + if (ret) + return ret; - setcolreg(crtc, red, green, blue, regno, info); crtc_funcs->load_lut(crtc); } return 0; @@ -928,7 +944,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, { info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : - FB_VISUAL_DIRECTCOLOR; + FB_VISUAL_TRUECOLOR; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b93814c0d3e..7f436ec075f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -89,7 +89,8 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) pci_set_power_state(dev->pdev, PCI_D3hot); } - dev_priv->suspended = 1; + /* Modeset on resume, not lid events */ + dev_priv->modeset_on_lid = 0; return 0; } @@ -124,7 +125,7 @@ static int i915_resume(struct drm_device *dev) drm_helper_resume_force_mode(dev); } - dev_priv->suspended = 0; + dev_priv->modeset_on_lid = 0; return ret; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6035d3dae85..c5df2234418 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -274,7 +274,7 @@ typedef struct drm_i915_private { struct drm_i915_display_funcs display; /* Register state */ - bool suspended; + bool modeset_on_lid; u8 saveLBB; u32 saveDSPACNTR; u32 saveDSPBCNTR; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 98ae3d73577..808bbe412ba 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -656,6 +656,15 @@ static int intel_lvds_get_modes(struct drm_connector *connector) return 0; } +/* + * Lid events. Note the use of 'modeset_on_lid': + * - we set it on lid close, and reset it on open + * - we use it as a "only once" bit (ie we ignore + * duplicate events where it was already properly + * set/reset) + * - the suspend/resume paths will also set it to + * zero, since they restore the mode ("lid open"). + */ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, void *unused) { @@ -663,13 +672,19 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, container_of(nb, struct drm_i915_private, lid_notifier); struct drm_device *dev = dev_priv->dev; - if (acpi_lid_open() && !dev_priv->suspended) { - mutex_lock(&dev->mode_config.mutex); - drm_helper_resume_force_mode(dev); - mutex_unlock(&dev->mode_config.mutex); + if (!acpi_lid_open()) { + dev_priv->modeset_on_lid = 1; + return NOTIFY_OK; } - drm_sysfs_hotplug_event(dev_priv->dev); + if (!dev_priv->modeset_on_lid) + return NOTIFY_OK; + + dev_priv->modeset_on_lid = 0; + + mutex_lock(&dev->mode_config.mutex); + drm_helper_resume_force_mode(dev); + mutex_unlock(&dev->mode_config.mutex); return NOTIFY_OK; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 3d667031de6..df988142e6b 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -582,10 +582,9 @@ int radeon_device_init(struct radeon_device *rdev, DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); /* if we have > 1 VGA cards, then disable the radeon VGA resources */ - r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); - if (r) { - return -EINVAL; - } + /* this will fail for cards that aren't VGA class devices, just + * ignore it */ + vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); r = radeon_init(rdev); if (r) diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c index 2c2cb1ec94c..27d62574284 100644 --- a/drivers/hwmon/dme1737.c +++ b/drivers/hwmon/dme1737.c @@ -572,7 +572,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev) /* Sample register contents every 1 sec */ if (time_after(jiffies, data->last_update + HZ) || !data->valid) { - if (data->type != sch5027) { + if (data->type == dme1737) { data->vid = dme1737_read(data, DME1737_REG_VID) & 0x3f; } @@ -1621,9 +1621,6 @@ static struct attribute *dme1737_misc_attr[] = { &sensor_dev_attr_zone1_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_zone2_auto_point1_temp_hyst.dev_attr.attr, &sensor_dev_attr_zone3_auto_point1_temp_hyst.dev_attr.attr, - /* Misc */ - &dev_attr_vrm.attr, - &dev_attr_cpu0_vid.attr, NULL }; @@ -1631,6 +1628,18 @@ static const struct attribute_group dme1737_misc_group = { .attrs = dme1737_misc_attr, }; +/* The following struct holds VID-related attributes. Their creation + depends on the chip type which is determined during module load. */ +static struct attribute *dme1737_vid_attr[] = { + &dev_attr_vrm.attr, + &dev_attr_cpu0_vid.attr, + NULL +}; + +static const struct attribute_group dme1737_vid_group = { + .attrs = dme1737_vid_attr, +}; + /* The following structs hold the PWM attributes, some of which are optional. * Their creation depends on the chip configuration which is determined during * module load. */ @@ -1902,6 +1911,9 @@ static void dme1737_remove_files(struct device *dev) if (data->type != sch5027) { sysfs_remove_group(&dev->kobj, &dme1737_misc_group); } + if (data->type == dme1737) { + sysfs_remove_group(&dev->kobj, &dme1737_vid_group); + } sysfs_remove_group(&dev->kobj, &dme1737_group); @@ -1933,6 +1945,13 @@ static int dme1737_create_files(struct device *dev) goto exit_remove; } + /* Create VID-related sysfs attributes */ + if ((data->type == dme1737) && + (err = sysfs_create_group(&dev->kobj, + &dme1737_vid_group))) { + goto exit_remove; + } + /* Create fan sysfs attributes */ for (ix = 0; ix < ARRAY_SIZE(dme1737_fan_group); ix++) { if (data->has_fan & (1 << ix)) { @@ -2127,7 +2146,7 @@ static int dme1737_init_device(struct device *dev) data->pwm_acz[2] = 4; /* pwm3 -> zone3 */ /* Set VRM */ - if (data->type != sch5027) { + if (data->type == dme1737) { data->vrm = vid_which_vrm(); } diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c index 2a7a85a6dc3..da1b1f9488a 100644 --- a/drivers/hwmon/fschmd.c +++ b/drivers/hwmon/fschmd.c @@ -819,7 +819,7 @@ static int watchdog_release(struct inode *inode, struct file *filp) static ssize_t watchdog_write(struct file *filp, const char __user *buf, size_t count, loff_t *offset) { - size_t ret; + int ret; struct fschmd_data *data = filp->private_data; if (count) { diff --git a/drivers/hwmon/hp_accel.c b/drivers/hwmon/hp_accel.c index 6679854c85b..be475e844c2 100644 --- a/drivers/hwmon/hp_accel.c +++ b/drivers/hwmon/hp_accel.c @@ -197,11 +197,13 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HP2133", "HP 2133", xy_rotated_left), AXIS_DMI_MATCH("HP2140", "HP 2140", xy_swap_inverted), AXIS_DMI_MATCH("NC653x", "HP Compaq 653", xy_rotated_left_usd), - AXIS_DMI_MATCH("NC673x", "HP Compaq 673", xy_rotated_left_usd), + AXIS_DMI_MATCH("NC6730b", "HP Compaq 6730b", xy_rotated_left_usd), + AXIS_DMI_MATCH("NC6730s", "HP Compaq 6730s", xy_swap), AXIS_DMI_MATCH("NC651xx", "HP Compaq 651", xy_rotated_right), AXIS_DMI_MATCH("NC6710x", "HP Compaq 6710", xy_swap_yz_inverted), AXIS_DMI_MATCH("NC6715x", "HP Compaq 6715", y_inverted), AXIS_DMI_MATCH("NC693xx", "HP EliteBook 693", xy_rotated_right), + AXIS_DMI_MATCH("NC693xx", "HP EliteBook 853", xy_swap), /* Intel-based HP Pavilion dv5 */ AXIS_DMI_MATCH2("HPDV5_I", PRODUCT_NAME, "HP Pavilion dv5", @@ -214,6 +216,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = { y_inverted), AXIS_DMI_MATCH("DV7", "HP Pavilion dv7", x_inverted), AXIS_DMI_MATCH("HP8710", "HP Compaq 8710", y_inverted), + AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), { NULL, } /* Laptop models without axis info (yet): * "NC6910" "HP Compaq 6910" diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index ffeb2a10e1a..a3749cb0f18 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c @@ -1028,12 +1028,11 @@ static int __init it87_find(unsigned short *address, chip_type, *address, sio_data->revision); /* Read GPIO config and VID value from LDN 7 (GPIO) */ - if (chip_type != IT8705F_DEVID) { + if (sio_data->type != it87) { int reg; superio_select(GPIO); - if ((chip_type == it8718) || - (chip_type == it8720)) + if (sio_data->type == it8718 || sio_data->type == it8720) sio_data->vid_value = superio_inb(IT87_SIO_VID_REG); reg = superio_inb(IT87_SIO_PINX2_REG); diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 4afba3ec2a6..e3654d683e1 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -120,19 +120,26 @@ struct imx_i2c_struct { wait_queue_head_t queue; unsigned long i2csr; unsigned int disable_delay; + int stopped; + unsigned int ifdr; /* IMX_I2C_IFDR */ }; /** Functions for IMX I2C adapter driver *************************************** *******************************************************************************/ -static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx) +static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) { unsigned long orig_jiffies = jiffies; + unsigned int temp; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - /* wait for bus not busy */ - while (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_IBB) { + while (1) { + temp = readb(i2c_imx->base + IMX_I2C_I2SR); + if (for_busy && (temp & I2SR_IBB)) + break; + if (!for_busy && !(temp & I2SR_IBB)) + break; if (signal_pending(current)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C Interrupted\n", __func__); @@ -179,41 +186,62 @@ static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx) return 0; } -static void i2c_imx_start(struct imx_i2c_struct *i2c_imx) +static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; + int result; dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + clk_enable(i2c_imx->clk); + writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR); /* Enable I2C controller */ + writeb(0, i2c_imx->base + IMX_I2C_I2SR); writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); + + /* Wait controller to be stable */ + udelay(50); + /* Start I2C transaction */ temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_MSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + result = i2c_imx_bus_busy(i2c_imx, 1); + if (result) + return result; + i2c_imx->stopped = 0; + temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + return result; } static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) { unsigned int temp = 0; - /* Stop I2C transaction */ - dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - temp = readb(i2c_imx->base + IMX_I2C_I2CR); - temp &= ~I2CR_MSTA; - writeb(temp, i2c_imx->base + IMX_I2C_I2CR); - /* setup chip registers to defaults */ - writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR); - writeb(0, i2c_imx->base + IMX_I2C_I2SR); - /* - * This delay caused by an i.MXL hardware bug. - * If no (or too short) delay, no "STOP" bit will be generated. - */ - udelay(i2c_imx->disable_delay); + if (!i2c_imx->stopped) { + /* Stop I2C transaction */ + dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); + temp = readb(i2c_imx->base + IMX_I2C_I2CR); + temp &= ~(I2CR_MSTA | I2CR_MTX); + writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + i2c_imx->stopped = 1; + } + if (cpu_is_mx1()) { + /* + * This delay caused by an i.MXL hardware bug. + * If no (or too short) delay, no "STOP" bit will be generated. + */ + udelay(i2c_imx->disable_delay); + } + + if (!i2c_imx->stopped) + i2c_imx_bus_busy(i2c_imx, 0); + /* Disable I2C controller */ writeb(0, i2c_imx->base + IMX_I2C_I2CR); + clk_disable(i2c_imx->clk); } static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, @@ -233,8 +261,8 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx, else for (i = 0; i2c_clk_div[i][0] < div; i++); - /* Write divider value to register */ - writeb(i2c_clk_div[i][1], i2c_imx->base + IMX_I2C_IFDR); + /* Store divider value */ + i2c_imx->ifdr = i2c_clk_div[i][1]; /* * There dummy delay is calculated. @@ -341,11 +369,15 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) if (result) return result; if (i == (msgs->len - 1)) { + /* It must generate STOP before read I2DR to prevent + controller from generating another clock cycle */ dev_dbg(&i2c_imx->adapter.dev, "<%s> clear MSTA\n", __func__); temp = readb(i2c_imx->base + IMX_I2C_I2CR); - temp &= ~I2CR_MSTA; + temp &= ~(I2CR_MSTA | I2CR_MTX); writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + i2c_imx_bus_busy(i2c_imx, 0); + i2c_imx->stopped = 1; } else if (i == (msgs->len - 2)) { dev_dbg(&i2c_imx->adapter.dev, "<%s> set TXAK\n", __func__); @@ -370,14 +402,11 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - /* Check if i2c bus is not busy */ - result = i2c_imx_bus_busy(i2c_imx); + /* Start I2C transfer */ + result = i2c_imx_start(i2c_imx); if (result) goto fail0; - /* Start I2C transfer */ - i2c_imx_start(i2c_imx); - /* read/write data */ for (i = 0; i < num; i++) { if (i) { @@ -386,6 +415,9 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, temp = readb(i2c_imx->base + IMX_I2C_I2CR); temp |= I2CR_RSTA; writeb(temp, i2c_imx->base + IMX_I2C_I2CR); + result = i2c_imx_bus_busy(i2c_imx, 1); + if (result) + goto fail0; } dev_dbg(&i2c_imx->adapter.dev, "<%s> transfer message: %d\n", __func__, i); @@ -500,7 +532,6 @@ static int __init i2c_imx_probe(struct platform_device *pdev) dev_err(&pdev->dev, "can't get I2C clock\n"); goto fail3; } - clk_enable(i2c_imx->clk); /* Request IRQ */ ret = request_irq(i2c_imx->irq, i2c_imx_isr, 0, pdev->name, i2c_imx); @@ -549,7 +580,6 @@ static int __init i2c_imx_probe(struct platform_device *pdev) fail5: free_irq(i2c_imx->irq, i2c_imx); fail4: - clk_disable(i2c_imx->clk); clk_put(i2c_imx->clk); fail3: release_mem_region(i2c_imx->res->start, resource_size(res)); @@ -586,8 +616,6 @@ static int __exit i2c_imx_remove(struct platform_device *pdev) if (pdata && pdata->exit) pdata->exit(&pdev->dev); - /* Disable I2C clock */ - clk_disable(i2c_imx->clk); clk_put(i2c_imx->clk); release_mem_region(i2c_imx->res->start, resource_size(i2c_imx->res)); diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c index d325e86e310..f627001108b 100644 --- a/drivers/i2c/busses/i2c-mpc.c +++ b/drivers/i2c/busses/i2c-mpc.c @@ -365,9 +365,6 @@ static int mpc_write(struct mpc_i2c *i2c, int target, unsigned timeout = i2c->adap.timeout; u32 flags = restart ? CCR_RSTA : 0; - /* Start with MEN */ - if (!restart) - writeccr(i2c, CCR_MEN); /* Start as master */ writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags); /* Write target byte */ @@ -396,9 +393,6 @@ static int mpc_read(struct mpc_i2c *i2c, int target, int i, result; u32 flags = restart ? CCR_RSTA : 0; - /* Start with MEN */ - if (!restart) - writeccr(i2c, CCR_MEN); /* Switch to read - restart */ writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX | flags); /* Write target address byte - this time with the read flag set */ @@ -425,9 +419,9 @@ static int mpc_read(struct mpc_i2c *i2c, int target, /* Generate txack on next to last byte */ if (i == length - 2) writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_TXAK); - /* Generate stop on last byte */ + /* Do not generate stop on last byte */ if (i == length - 1) - writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_TXAK); + writeccr(i2c, CCR_MIEN | CCR_MEN | CCR_MSTA | CCR_MTX); data[i] = readb(i2c->base + MPC_I2C_DR); } diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c index 6396c3ad325..837322b10a4 100644 --- a/drivers/ide/atiixp.c +++ b/drivers/ide/atiixp.c @@ -177,7 +177,7 @@ static const struct pci_device_id atiixp_pci_tbl[] = { { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), 0 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), 1 }, { PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), 0 }, - { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_SB900_IDE), 0 }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, atiixp_pci_tbl); diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c index 680e5975217..ca0c46f6580 100644 --- a/drivers/ide/cmd64x.c +++ b/drivers/ide/cmd64x.c @@ -379,7 +379,8 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { .enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}}, .port_ops = &cmd64x_port_ops, .host_flags = IDE_HFLAG_CLEAR_SIMPLEX | - IDE_HFLAG_ABUSE_PREFETCH, + IDE_HFLAG_ABUSE_PREFETCH | + IDE_HFLAG_SERIALIZE, .pio_mask = ATA_PIO5, .mwdma_mask = ATA_MWDMA2, .udma_mask = 0x00, /* no udma */ @@ -389,7 +390,8 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { .init_chipset = init_chipset_cmd64x, .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, .port_ops = &cmd648_port_ops, - .host_flags = IDE_HFLAG_ABUSE_PREFETCH, + .host_flags = IDE_HFLAG_ABUSE_PREFETCH | + IDE_HFLAG_SERIALIZE, .pio_mask = ATA_PIO5, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, diff --git a/drivers/input/input.c b/drivers/input/input.c index c6f88ebb40c..cc763c96fad 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c @@ -782,10 +782,29 @@ static unsigned int input_proc_devices_poll(struct file *file, poll_table *wait) return 0; } +union input_seq_state { + struct { + unsigned short pos; + bool mutex_acquired; + }; + void *p; +}; + static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) { - if (mutex_lock_interruptible(&input_mutex)) - return NULL; + union input_seq_state *state = (union input_seq_state *)&seq->private; + int error; + + /* We need to fit into seq->private pointer */ + BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); + + error = mutex_lock_interruptible(&input_mutex); + if (error) { + state->mutex_acquired = false; + return ERR_PTR(error); + } + + state->mutex_acquired = true; return seq_list_start(&input_dev_list, *pos); } @@ -795,9 +814,12 @@ static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) return seq_list_next(v, &input_dev_list, pos); } -static void input_devices_seq_stop(struct seq_file *seq, void *v) +static void input_seq_stop(struct seq_file *seq, void *v) { - mutex_unlock(&input_mutex); + union input_seq_state *state = (union input_seq_state *)&seq->private; + + if (state->mutex_acquired) + mutex_unlock(&input_mutex); } static void input_seq_print_bitmap(struct seq_file *seq, const char *name, @@ -861,7 +883,7 @@ static int input_devices_seq_show(struct seq_file *seq, void *v) static const struct seq_operations input_devices_seq_ops = { .start = input_devices_seq_start, .next = input_devices_seq_next, - .stop = input_devices_seq_stop, + .stop = input_seq_stop, .show = input_devices_seq_show, }; @@ -881,40 +903,49 @@ static const struct file_operations input_devices_fileops = { static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) { - if (mutex_lock_interruptible(&input_mutex)) - return NULL; + union input_seq_state *state = (union input_seq_state *)&seq->private; + int error; + + /* We need to fit into seq->private pointer */ + BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); + + error = mutex_lock_interruptible(&input_mutex); + if (error) { + state->mutex_acquired = false; + return ERR_PTR(error); + } + + state->mutex_acquired = true; + state->pos = *pos; - seq->private = (void *)(unsigned long)*pos; return seq_list_start(&input_handler_list, *pos); } static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) { - seq->private = (void *)(unsigned long)(*pos + 1); - return seq_list_next(v, &input_handler_list, pos); -} + union input_seq_state *state = (union input_seq_state *)&seq->private; -static void input_handlers_seq_stop(struct seq_file *seq, void *v) -{ - mutex_unlock(&input_mutex); + state->pos = *pos + 1; + return seq_list_next(v, &input_handler_list, pos); } static int input_handlers_seq_show(struct seq_file *seq, void *v) { struct input_handler *handler = container_of(v, struct input_handler, node); + union input_seq_state *state = (union input_seq_state *)&seq->private; - seq_printf(seq, "N: Number=%ld Name=%s", - (unsigned long)seq->private, handler->name); + seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); if (handler->fops) seq_printf(seq, " Minor=%d", handler->minor); seq_putc(seq, '\n'); return 0; } + static const struct seq_operations input_handlers_seq_ops = { .start = input_handlers_seq_start, .next = input_handlers_seq_next, - .stop = input_handlers_seq_stop, + .stop = input_seq_stop, .show = input_handlers_seq_show, }; diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 4709e15af60..4452eabbee6 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -233,6 +233,7 @@ struct atkbd { */ static void (*atkbd_platform_fixup)(struct atkbd *, const void *data); static void *atkbd_platform_fixup_data; +static unsigned int (*atkbd_platform_scancode_fixup)(struct atkbd *, unsigned int); static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf, ssize_t (*handler)(struct atkbd *, char *)); @@ -393,6 +394,9 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data, input_event(dev, EV_MSC, MSC_RAW, code); + if (atkbd_platform_scancode_fixup) + code = atkbd_platform_scancode_fixup(atkbd, code); + if (atkbd->translated) { if (atkbd->emul || atkbd_need_xlate(atkbd->xl_bit, code)) { @@ -574,11 +578,22 @@ static void atkbd_event_work(struct work_struct *work) mutex_lock(&atkbd->event_mutex); - if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) - atkbd_set_leds(atkbd); + if (!atkbd->enabled) { + /* + * Serio ports are resumed asynchronously so while driver core + * thinks that device is already fully operational in reality + * it may not be ready yet. In this case we need to keep + * rescheduling till reconnect completes. + */ + schedule_delayed_work(&atkbd->event_work, + msecs_to_jiffies(100)); + } else { + if (test_and_clear_bit(ATKBD_LED_EVENT_BIT, &atkbd->event_mask)) + atkbd_set_leds(atkbd); - if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) - atkbd_set_repeat_rate(atkbd); + if (test_and_clear_bit(ATKBD_REP_EVENT_BIT, &atkbd->event_mask)) + atkbd_set_repeat_rate(atkbd); + } mutex_unlock(&atkbd->event_mutex); } @@ -770,6 +785,30 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra return 3; } +static int atkbd_reset_state(struct atkbd *atkbd) +{ + struct ps2dev *ps2dev = &atkbd->ps2dev; + unsigned char param[1]; + +/* + * Set the LEDs to a predefined state (all off). + */ + + param[0] = 0; + if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS)) + return -1; + +/* + * Set autorepeat to fastest possible. + */ + + param[0] = 0; + if (ps2_command(ps2dev, param, ATKBD_CMD_SETREP)) + return -1; + + return 0; +} + static int atkbd_activate(struct atkbd *atkbd) { struct ps2dev *ps2dev = &atkbd->ps2dev; @@ -852,29 +891,6 @@ static unsigned int atkbd_hp_forced_release_keys[] = { }; /* - * Inventec system with broken key release on volume keys - */ -static unsigned int atkbd_inventec_forced_release_keys[] = { - 0xae, 0xb0, -1U -}; - -/* - * Perform fixup for HP Pavilion ZV6100 laptop that doesn't generate release - * for its volume buttons - */ -static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { - 0xae, 0xb0, -1U -}; - -/* - * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate - * release for their volume buttons - */ -static unsigned int atkbd_hp_r4000_forced_release_keys[] = { - 0xae, 0xb0, -1U -}; - -/* * Samsung NC10,NC20 with Fn+F? key release not working */ static unsigned int atkbd_samsung_forced_release_keys[] = { @@ -882,14 +898,6 @@ static unsigned int atkbd_samsung_forced_release_keys[] = { }; /* - * The volume up and volume down special keys on a Fujitsu Amilo PA 1510 laptop - * do not generate release events so we have to do it ourselves. - */ -static unsigned int atkbd_amilo_pa1510_forced_release_keys[] = { - 0xb0, 0xae, -1U -}; - -/* * Amilo Pi 3525 key release for Fn+Volume keys not working */ static unsigned int atkbd_amilo_pi3525_forced_release_keys[] = { @@ -911,6 +919,30 @@ static unsigned int atkdb_soltech_ta12_forced_release_keys[] = { }; /* + * Many notebooks don't send key release event for volume up/down + * keys, with key list below common among them + */ +static unsigned int atkbd_volume_forced_release_keys[] = { + 0xae, 0xb0, -1U +}; + +/* + * OQO 01+ multimedia keys (64--66) generate e0 6x upon release whereas + * they should be generating e4-e6 (0x80 | code). + */ +static unsigned int atkbd_oqo_01plus_scancode_fixup(struct atkbd *atkbd, + unsigned int code) +{ + if (atkbd->translated && atkbd->emul == 1 && + (code == 0x64 || code == 0x65 || code == 0x66)) { + atkbd->emul = 0; + code |= 0x80; + } + + return code; +} + +/* * atkbd_set_keycode_table() initializes keyboard's keycode table * according to the selected scancode set */ @@ -1087,6 +1119,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) } atkbd->set = atkbd_select_set(atkbd, atkbd_set, atkbd_extra); + atkbd_reset_state(atkbd); atkbd_activate(atkbd); } else { @@ -1267,6 +1300,7 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun atkbd->dev = new_dev; atkbd->set = atkbd_select_set(atkbd, atkbd->set, value); + atkbd_reset_state(atkbd); atkbd_activate(atkbd); atkbd_set_keycode_table(atkbd); atkbd_set_device_attrs(atkbd); @@ -1513,6 +1547,13 @@ static int __init atkbd_setup_forced_release(const struct dmi_system_id *id) return 0; } +static int __init atkbd_setup_scancode_fixup(const struct dmi_system_id *id) +{ + atkbd_platform_scancode_fixup = id->driver_data; + + return 0; +} + static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { { .ident = "Dell Laptop", @@ -1548,7 +1589,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion ZV6100"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_hp_zv6100_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "HP Presario R4000", @@ -1557,7 +1598,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_hp_r4000_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "HP Presario R4100", @@ -1566,7 +1607,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_hp_r4000_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "HP Presario R4200", @@ -1575,7 +1616,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_hp_r4000_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "Inventec Symphony", @@ -1584,7 +1625,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_inventec_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "Samsung NC10", @@ -1620,7 +1661,7 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pa 1510"), }, .callback = atkbd_setup_forced_release, - .driver_data = atkbd_amilo_pa1510_forced_release_keys, + .driver_data = atkbd_volume_forced_release_keys, }, { .ident = "Fujitsu Amilo Pi 3525", @@ -1649,6 +1690,15 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { .callback = atkbd_setup_forced_release, .driver_data = atkdb_soltech_ta12_forced_release_keys, }, + { + .ident = "OQO Model 01+", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "OQO"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZEPTO"), + }, + .callback = atkbd_setup_scancode_fixup, + .driver_data = atkbd_oqo_01plus_scancode_fixup, + }, { } }; diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index a88aff3816a..77d13091425 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -147,6 +147,7 @@ static int __devinit gpio_keys_probe(struct platform_device *pdev) } error = request_irq(irq, gpio_keys_isr, + IRQF_SHARED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, button->desc ? button->desc : "gpio_keys", bdata); diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 02f4f8f1db6..a9bb2544b2d 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -227,6 +227,7 @@ config INPUT_WINBOND_CIR depends on X86 && PNP select NEW_LEDS select LEDS_CLASS + select LEDS_TRIGGERS select BITREVERSE help Say Y here if you want to use the IR remote functionality found diff --git a/drivers/input/misc/hp_sdc_rtc.c b/drivers/input/misc/hp_sdc_rtc.c index 216a559f55e..ea821b54696 100644 --- a/drivers/input/misc/hp_sdc_rtc.c +++ b/drivers/input/misc/hp_sdc_rtc.c @@ -209,7 +209,7 @@ static inline int hp_sdc_rtc_read_rt(struct timeval *res) { /* Read the i8042 fast handshake timer */ static inline int hp_sdc_rtc_read_fhs(struct timeval *res) { - uint64_t raw; + int64_t raw; unsigned int tenms; raw = hp_sdc_rtc_read_i8042timer(HP_SDC_CMD_LOAD_FHS, 2); diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c index de745d75116..ab5dc5f5fd8 100644 --- a/drivers/input/mouse/logips2pp.c +++ b/drivers/input/mouse/logips2pp.c @@ -219,7 +219,7 @@ static const struct ps2pp_info *get_model_info(unsigned char model) PS2PP_WHEEL | PS2PP_SIDE_BTN | PS2PP_TASK_BTN | PS2PP_EXTRA_BTN | PS2PP_NAV_BTN | PS2PP_HWHEEL }, { 72, PS2PP_KIND_TRACKMAN, 0 }, /* T-CH11: TrackMan Marble */ - { 73, 0, PS2PP_SIDE_BTN }, + { 73, PS2PP_KIND_TRACKMAN, PS2PP_SIDE_BTN }, /* TrackMan FX */ { 75, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 76, PS2PP_KIND_WHEEL, PS2PP_WHEEL }, { 79, PS2PP_KIND_TRACKMAN, PS2PP_WHEEL }, /* TrackMan with wheel */ diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index b66ff1ac7de..f4a61252bcc 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -652,6 +652,16 @@ static const struct dmi_system_id toshiba_dmi_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), }, + + }, + { + .ident = "Toshiba Portege M300", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), + }, + }, { } }; diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index a39bc4eb902..a537925f765 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -327,6 +327,17 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { }, }, { + /* + * Reset and GET ID commands issued via KBD port are + * sometimes being delivered to AUX3. + */ + .ident = "Sony Vaio FZ-240E", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"), + }, + }, + { .ident = "Amoi M636/A737", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."), @@ -661,7 +672,7 @@ static void i8042_pnp_exit(void) static int __init i8042_pnp_init(void) { char kbd_irq_str[4] = { 0 }, aux_irq_str[4] = { 0 }; - int pnp_data_busted = false; + bool pnp_data_busted = false; int err; #ifdef CONFIG_X86 diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index a31578170cc..1df02d25aca 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -836,17 +836,32 @@ static int i8042_controller_selftest(void) static int i8042_controller_init(void) { unsigned long flags; + int n = 0; + unsigned char ctr[2]; /* - * Save the CTR for restoral on unload / reboot. + * Save the CTR for restore on unload / reboot. */ - if (i8042_command(&i8042_ctr, I8042_CMD_CTL_RCTR)) { - printk(KERN_ERR "i8042.c: Can't read CTR while initializing i8042.\n"); - return -EIO; - } + do { + if (n >= 10) { + printk(KERN_ERR + "i8042.c: Unable to get stable CTR read.\n"); + return -EIO; + } + + if (n != 0) + udelay(50); + + if (i8042_command(&ctr[n++ % 2], I8042_CMD_CTL_RCTR)) { + printk(KERN_ERR + "i8042.c: Can't read CTR while initializing i8042.\n"); + return -EIO; + } - i8042_initial_ctr = i8042_ctr; + } while (n < 2 || ctr[0] != ctr[1]); + + i8042_initial_ctr = i8042_ctr = ctr[0]; /* * Disable the keyboard interface and interrupt. @@ -895,6 +910,12 @@ static int i8042_controller_init(void) return -EIO; } +/* + * Flush whatever accumulated while we were disabling keyboard port. + */ + + i8042_flush(); + return 0; } @@ -914,7 +935,7 @@ static void i8042_controller_reset(void) i8042_ctr |= I8042_CTR_KBDDIS | I8042_CTR_AUXDIS; i8042_ctr &= ~(I8042_CTR_KBDINT | I8042_CTR_AUXINT); - if (i8042_command(&i8042_initial_ctr, I8042_CMD_CTL_WCTR)) + if (i8042_command(&i8042_ctr, I8042_CMD_CTL_WCTR)) printk(KERN_WARNING "i8042.c: Can't write CTR while resetting.\n"); /* diff --git a/drivers/isdn/i4l/isdn_net.h b/drivers/isdn/i4l/isdn_net.h index 74032d0881e..7511f08effa 100644 --- a/drivers/isdn/i4l/isdn_net.h +++ b/drivers/isdn/i4l/isdn_net.h @@ -83,19 +83,19 @@ static __inline__ isdn_net_local * isdn_net_get_locked_lp(isdn_net_dev *nd) spin_lock_irqsave(&nd->queue_lock, flags); lp = nd->queue; /* get lp on top of queue */ - spin_lock(&nd->queue->xmit_lock); while (isdn_net_lp_busy(nd->queue)) { - spin_unlock(&nd->queue->xmit_lock); nd->queue = nd->queue->next; if (nd->queue == lp) { /* not found -- should never happen */ lp = NULL; goto errout; } - spin_lock(&nd->queue->xmit_lock); } lp = nd->queue; nd->queue = nd->queue->next; + spin_unlock_irqrestore(&nd->queue_lock, flags); + spin_lock(&lp->xmit_lock); local_bh_disable(); + return lp; errout: spin_unlock_irqrestore(&nd->queue_lock, flags); return lp; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index b40fb9b6c86..6f308a4757e 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -405,7 +405,11 @@ static int __init via_pmu_start(void) printk(KERN_ERR "via-pmu: can't map interrupt\n"); return -ENODEV; } - if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) { + /* We set IRQF_TIMER because we don't want the interrupt to be disabled + * between the 2 passes of driver suspend, we control our own disabling + * for that one + */ + if (request_irq(irq, via_pmu_interrupt, IRQF_TIMER, "VIA-PMU", (void *)0)) { printk(KERN_ERR "via-pmu: can't request irq %d\n", irq); return -ENODEV; } @@ -419,7 +423,7 @@ static int __init via_pmu_start(void) gpio_irq = irq_of_parse_and_map(gpio_node, 0); if (gpio_irq != NO_IRQ) { - if (request_irq(gpio_irq, gpio1_interrupt, 0, + if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER, "GPIO1 ADB", (void *)0)) printk(KERN_ERR "pmu: can't get irq %d" " (GPIO1)\n", gpio_irq); @@ -925,8 +929,7 @@ proc_write_options(struct file *file, const char __user *buffer, #ifdef CONFIG_ADB /* Send an ADB command */ -static int -pmu_send_request(struct adb_request *req, int sync) +static int pmu_send_request(struct adb_request *req, int sync) { int i, ret; @@ -1005,16 +1008,11 @@ pmu_send_request(struct adb_request *req, int sync) } /* Enable/disable autopolling */ -static int -pmu_adb_autopoll(int devs) +static int __pmu_adb_autopoll(int devs) { struct adb_request req; - if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) - return -ENXIO; - if (devs) { - adb_dev_map = devs; pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, adb_dev_map >> 8, adb_dev_map); pmu_adb_flags = 2; @@ -1027,9 +1025,17 @@ pmu_adb_autopoll(int devs) return 0; } +static int pmu_adb_autopoll(int devs) +{ + if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb) + return -ENXIO; + + adb_dev_map = devs; + return __pmu_adb_autopoll(devs); +} + /* Reset the ADB bus */ -static int -pmu_adb_reset_bus(void) +static int pmu_adb_reset_bus(void) { struct adb_request req; int save_autopoll = adb_dev_map; @@ -1038,13 +1044,13 @@ pmu_adb_reset_bus(void) return -ENXIO; /* anyone got a better idea?? */ - pmu_adb_autopoll(0); + __pmu_adb_autopoll(0); - req.nbytes = 5; + req.nbytes = 4; req.done = NULL; req.data[0] = PMU_ADB_CMD; - req.data[1] = 0; - req.data[2] = ADB_BUSRESET; + req.data[1] = ADB_BUSRESET; + req.data[2] = 0; req.data[3] = 0; req.data[4] = 0; req.reply_len = 0; @@ -1056,7 +1062,7 @@ pmu_adb_reset_bus(void) pmu_wait_complete(&req); if (save_autopoll != 0) - pmu_adb_autopoll(save_autopoll); + __pmu_adb_autopoll(save_autopoll); return 0; } diff --git a/drivers/md/Makefile b/drivers/md/Makefile index 1dc4185bd78..e355e7f6a53 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -46,7 +46,7 @@ obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o obj-$(CONFIG_DM_ZERO) += dm-zero.o quiet_cmd_unroll = UNROLL $@ - cmd_unroll = $(PERL) $(srctree)/$(src)/unroll.pl $(UNROLL) \ + cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$(UNROLL) \ < $< > $@ || ( rm -f $@ && exit 1 ) ifeq ($(CONFIG_ALTIVEC),y) @@ -59,56 +59,56 @@ endif targets += raid6int1.c $(obj)/raid6int1.c: UNROLL := 1 -$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int1.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) targets += raid6int2.c $(obj)/raid6int2.c: UNROLL := 2 -$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int2.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) targets += raid6int4.c $(obj)/raid6int4.c: UNROLL := 4 -$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int4.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) targets += raid6int8.c $(obj)/raid6int8.c: UNROLL := 8 -$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int8.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) targets += raid6int16.c $(obj)/raid6int16.c: UNROLL := 16 -$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int16.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) targets += raid6int32.c $(obj)/raid6int32.c: UNROLL := 32 -$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.pl FORCE +$(obj)/raid6int32.c: $(src)/raid6int.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) CFLAGS_raid6altivec1.o += $(altivec_flags) targets += raid6altivec1.c $(obj)/raid6altivec1.c: UNROLL := 1 -$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE +$(obj)/raid6altivec1.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) CFLAGS_raid6altivec2.o += $(altivec_flags) targets += raid6altivec2.c $(obj)/raid6altivec2.c: UNROLL := 2 -$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE +$(obj)/raid6altivec2.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) CFLAGS_raid6altivec4.o += $(altivec_flags) targets += raid6altivec4.c $(obj)/raid6altivec4.c: UNROLL := 4 -$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE +$(obj)/raid6altivec4.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) CFLAGS_raid6altivec8.o += $(altivec_flags) targets += raid6altivec8.c $(obj)/raid6altivec8.c: UNROLL := 8 -$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.pl FORCE +$(obj)/raid6altivec8.c: $(src)/raid6altivec.uc $(src)/unroll.awk FORCE $(call if_changed,unroll) quiet_cmd_mktable = TABLE $@ diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 6986b0059d2..60e2b322db1 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1624,10 +1624,11 @@ int bitmap_create(mddev_t *mddev) bitmap->offset = mddev->bitmap_offset; if (file) { get_file(file); - do_sync_mapping_range(file->f_mapping, 0, LLONG_MAX, - SYNC_FILE_RANGE_WAIT_BEFORE | - SYNC_FILE_RANGE_WRITE | - SYNC_FILE_RANGE_WAIT_AFTER); + /* As future accesses to this file will use bmap, + * and bypass the page cache, we must sync the file + * first. + */ + vfs_fsync(file, file->f_dentry, 1); } /* read superblock from bitmap file (this sets bitmap->chunksize) */ err = bitmap_read_sb(bitmap); diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 556acff3952..7dbe652efb5 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -138,16 +138,6 @@ int dm_exception_store_type_unregister(struct dm_exception_store_type *type) } EXPORT_SYMBOL(dm_exception_store_type_unregister); -/* - * Round a number up to the nearest 'size' boundary. size must - * be a power of 2. - */ -static ulong round_up(ulong n, ulong size) -{ - size--; - return (n + size) & ~size; -} - static int set_chunk_size(struct dm_exception_store *store, const char *chunk_size_arg, char **error) { @@ -155,7 +145,8 @@ static int set_chunk_size(struct dm_exception_store *store, char *value; chunk_size_ulong = simple_strtoul(chunk_size_arg, &value, 10); - if (*chunk_size_arg == '\0' || *value != '\0') { + if (*chunk_size_arg == '\0' || *value != '\0' || + chunk_size_ulong > UINT_MAX) { *error = "Invalid chunk size"; return -EINVAL; } @@ -165,40 +156,35 @@ static int set_chunk_size(struct dm_exception_store *store, return 0; } - /* - * Chunk size must be multiple of page size. Silently - * round up if it's not. - */ - chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); - - return dm_exception_store_set_chunk_size(store, chunk_size_ulong, + return dm_exception_store_set_chunk_size(store, + (unsigned) chunk_size_ulong, error); } int dm_exception_store_set_chunk_size(struct dm_exception_store *store, - unsigned long chunk_size_ulong, + unsigned chunk_size, char **error) { /* Check chunk_size is a power of 2 */ - if (!is_power_of_2(chunk_size_ulong)) { + if (!is_power_of_2(chunk_size)) { *error = "Chunk size is not a power of 2"; return -EINVAL; } /* Validate the chunk size against the device block size */ - if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { + if (chunk_size % (bdev_logical_block_size(store->cow->bdev) >> 9)) { *error = "Chunk size is not a multiple of device blocksize"; return -EINVAL; } - if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { + if (chunk_size > INT_MAX >> SECTOR_SHIFT) { *error = "Chunk size is too high"; return -EINVAL; } - store->chunk_size = chunk_size_ulong; - store->chunk_mask = chunk_size_ulong - 1; - store->chunk_shift = ffs(chunk_size_ulong) - 1; + store->chunk_size = chunk_size; + store->chunk_mask = chunk_size - 1; + store->chunk_shift = ffs(chunk_size) - 1; return 0; } @@ -251,7 +237,7 @@ int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, r = set_chunk_size(tmp_store, argv[2], &ti->error); if (r) - goto bad_cow; + goto bad_ctr; r = type->ctr(tmp_store, 0, NULL); if (r) { diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 812c71872ba..8a223a48802 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -101,9 +101,9 @@ struct dm_exception_store { struct dm_dev *cow; /* Size of data blocks saved - must be a power of 2 */ - chunk_t chunk_size; - chunk_t chunk_mask; - chunk_t chunk_shift; + unsigned chunk_size; + unsigned chunk_mask; + unsigned chunk_shift; void *context; }; @@ -169,7 +169,7 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type); int dm_exception_store_type_unregister(struct dm_exception_store_type *type); int dm_exception_store_set_chunk_size(struct dm_exception_store *store, - unsigned long chunk_size_ulong, + unsigned chunk_size, char **error); int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index 652bd33109e..7ac2c1450d1 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c @@ -156,7 +156,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, } /* The ptr value is sufficient for local unique id */ - lc->luid = (uint64_t)lc; + lc->luid = (unsigned long)lc; lc->ti = ti; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index d5b2e08750d..0c746420c00 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -284,12 +284,13 @@ static int read_header(struct pstore *ps, int *new_snapshot) { int r; struct disk_header *dh; - chunk_t chunk_size; + unsigned chunk_size; int chunk_size_supplied = 1; char *chunk_err; /* - * Use default chunk size (or hardsect_size, if larger) if none supplied + * Use default chunk size (or logical_block_size, if larger) + * if none supplied */ if (!ps->store->chunk_size) { ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, @@ -334,10 +335,9 @@ static int read_header(struct pstore *ps, int *new_snapshot) return 0; if (chunk_size_supplied) - DMWARN("chunk size %llu in device metadata overrides " - "table chunk size of %llu.", - (unsigned long long)chunk_size, - (unsigned long long)ps->store->chunk_size); + DMWARN("chunk size %u in device metadata overrides " + "table chunk size of %u.", + chunk_size, ps->store->chunk_size); /* We had a bogus chunk_size. Fix stuff up. */ free_area(ps); @@ -345,8 +345,8 @@ static int read_header(struct pstore *ps, int *new_snapshot) r = dm_exception_store_set_chunk_size(ps->store, chunk_size, &chunk_err); if (r) { - DMERR("invalid on-disk chunk size %llu: %s.", - (unsigned long long)chunk_size, chunk_err); + DMERR("invalid on-disk chunk size %u: %s.", + chunk_size, chunk_err); return r; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 57f1bf7f3b7..3a3ba46e6d4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -296,6 +296,7 @@ static void __insert_origin(struct origin *o) */ static int register_snapshot(struct dm_snapshot *snap) { + struct dm_snapshot *l; struct origin *o, *new_o; struct block_device *bdev = snap->origin->bdev; @@ -319,7 +320,11 @@ static int register_snapshot(struct dm_snapshot *snap) __insert_origin(o); } - list_add_tail(&snap->list, &o->snapshots); + /* Sort the list according to chunk size, largest-first smallest-last */ + list_for_each_entry(l, &o->snapshots, list) + if (l->store->chunk_size < snap->store->chunk_size) + break; + list_add_tail(&snap->list, &l->list); up_write(&_origins_lock); return 0; @@ -668,6 +673,11 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) bio_list_init(&s->queued_bios); INIT_WORK(&s->queued_bios_work, flush_queued_bios); + if (!s->store->chunk_size) { + ti->error = "Chunk size not set"; + goto bad_load_and_register; + } + /* Add snapshot to the list of snapshots for this origin */ /* Exceptions aren't triggered till snapshot_resume() is called */ if (register_snapshot(s)) { @@ -951,7 +961,7 @@ static void start_copy(struct dm_snap_pending_exception *pe) src.bdev = bdev; src.sector = chunk_to_sector(s->store, pe->e.old_chunk); - src.count = min(s->store->chunk_size, dev_size - src.sector); + src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); dest.bdev = s->store->cow->bdev; dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); @@ -1142,6 +1152,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, unsigned sz = 0; struct dm_snapshot *snap = ti->private; + down_write(&snap->lock); + switch (type) { case STATUSTYPE_INFO: if (!snap->valid) @@ -1173,6 +1185,8 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, break; } + up_write(&snap->lock); + return 0; } @@ -1388,7 +1402,7 @@ static void origin_resume(struct dm_target *ti) struct dm_dev *dev = ti->private; struct dm_snapshot *snap; struct origin *o; - chunk_t chunk_size = 0; + unsigned chunk_size = 0; down_read(&_origins_lock); o = __lookup_origin(dev->bdev); @@ -1465,7 +1479,7 @@ static int __init dm_snapshot_init(void) r = dm_register_target(&snapshot_target); if (r) { DMERR("snapshot target register failed %d", r); - return r; + goto bad_register_snapshot_target; } r = dm_register_target(&origin_target); @@ -1522,6 +1536,9 @@ bad2: dm_unregister_target(&origin_target); bad1: dm_unregister_target(&snapshot_target); + +bad_register_snapshot_target: + dm_exception_store_exit(); return r; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 376f1ab48a2..724efc63904 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -47,6 +47,7 @@ struct dm_io { atomic_t io_count; struct bio *bio; unsigned long start_time; + spinlock_t endio_lock; }; /* @@ -578,8 +579,12 @@ static void dec_pending(struct dm_io *io, int error) struct mapped_device *md = io->md; /* Push-back supersedes any I/O errors */ - if (error && !(io->error > 0 && __noflush_suspending(md))) - io->error = error; + if (unlikely(error)) { + spin_lock_irqsave(&io->endio_lock, flags); + if (!(io->error > 0 && __noflush_suspending(md))) + io->error = error; + spin_unlock_irqrestore(&io->endio_lock, flags); + } if (atomic_dec_and_test(&io->io_count)) { if (io->error == DM_ENDIO_REQUEUE) { @@ -1226,6 +1231,7 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio) atomic_set(&ci.io->io_count, 1); ci.io->bio = bio; ci.io->md = md; + spin_lock_init(&ci.io->endio_lock); ci.sector = bio->bi_sector; ci.sector_count = bio_sectors(bio); if (unlikely(bio_empty_barrier(bio))) @@ -1822,6 +1828,7 @@ static struct mapped_device *alloc_dev(int minor) bad_bdev: destroy_workqueue(md->wq); bad_thread: + del_gendisk(md->disk); put_disk(md->disk); bad_disk: blk_cleanup_queue(md->queue); diff --git a/drivers/md/md.c b/drivers/md/md.c index 26ba42a7912..10eb1fce975 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2631,7 +2631,7 @@ static void analyze_sbs(mddev_t * mddev) rdev->desc_nr = i++; rdev->raid_disk = rdev->desc_nr; set_bit(In_sync, &rdev->flags); - } else if (rdev->raid_disk >= mddev->raid_disks) { + } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { rdev->raid_disk = -1; clear_bit(In_sync, &rdev->flags); } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d1b9bd5fd4f..a053423785c 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -64,7 +64,7 @@ static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) /* allocate a r1bio with room for raid_disks entries in the bios array */ r1_bio = kzalloc(size, gfp_flags); - if (!r1_bio) + if (!r1_bio && pi->mddev) unplug_slaves(pi->mddev); return r1_bio; @@ -1683,6 +1683,7 @@ static void raid1d(mddev_t *mddev) generic_make_request(bio); } } + cond_resched(); } if (unplug) unplug_slaves(mddev); @@ -1978,13 +1979,14 @@ static int run(mddev_t *mddev) conf->poolinfo = kmalloc(sizeof(*conf->poolinfo), GFP_KERNEL); if (!conf->poolinfo) goto out_no_mem; - conf->poolinfo->mddev = mddev; + conf->poolinfo->mddev = NULL; conf->poolinfo->raid_disks = mddev->raid_disks; conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, r1bio_pool_free, conf->poolinfo); if (!conf->r1bio_pool) goto out_no_mem; + conf->poolinfo->mddev = mddev; spin_lock_init(&conf->device_lock); mddev->queue->queue_lock = &conf->device_lock; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 51c4c5c4d87..c2cb7b87b44 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -68,7 +68,7 @@ static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) /* allocate a r10bio with room for raid_disks entries in the bios array */ r10_bio = kzalloc(size, gfp_flags); - if (!r10_bio) + if (!r10_bio && conf->mddev) unplug_slaves(conf->mddev); return r10_bio; @@ -1632,6 +1632,7 @@ static void raid10d(mddev_t *mddev) generic_make_request(bio); } } + cond_resched(); } if (unplug) unplug_slaves(mddev); @@ -2095,7 +2096,6 @@ static int run(mddev_t *mddev) if (!conf->tmppage) goto out_free_conf; - conf->mddev = mddev; conf->raid_disks = mddev->raid_disks; conf->near_copies = nc; conf->far_copies = fc; @@ -2132,6 +2132,7 @@ static int run(mddev_t *mddev) goto out_free_conf; } + conf->mddev = mddev; spin_lock_init(&conf->device_lock); mddev->queue->queue_lock = &conf->device_lock; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 94829804ab7..81abefc172d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -156,13 +156,16 @@ static inline int raid6_next_disk(int disk, int raid_disks) static int raid6_idx_to_slot(int idx, struct stripe_head *sh, int *count, int syndrome_disks) { - int slot; + int slot = *count; + if (sh->ddf_layout) + (*count)++; if (idx == sh->pd_idx) return syndrome_disks; if (idx == sh->qd_idx) return syndrome_disks + 1; - slot = (*count)++; + if (!sh->ddf_layout) + (*count)++; return slot; } @@ -717,7 +720,7 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) int i; for (i = 0; i < disks; i++) - srcs[i] = (void *)raid6_empty_zero_page; + srcs[i] = NULL; count = 0; i = d0_idx; @@ -727,9 +730,8 @@ static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) srcs[slot] = sh->dev[i].page; i = raid6_next_disk(i, disks); } while (i != d0_idx); - BUG_ON(count != syndrome_disks); - return count; + return syndrome_disks; } static struct dma_async_tx_descriptor * @@ -814,7 +816,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) * slot number conversion for 'faila' and 'failb' */ for (i = 0; i < disks ; i++) - blocks[i] = (void *)raid6_empty_zero_page; + blocks[i] = NULL; count = 0; i = d0_idx; do { @@ -828,7 +830,6 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) failb = slot; i = raid6_next_disk(i, disks); } while (i != d0_idx); - BUG_ON(count != syndrome_disks); BUG_ON(faila == failb); if (failb < faila) @@ -845,7 +846,7 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute, sh, to_addr_conv(sh, percpu)); - return async_gen_syndrome(blocks, 0, count+2, + return async_gen_syndrome(blocks, 0, syndrome_disks+2, STRIPE_SIZE, &submit); } else { struct page *dest; @@ -1139,7 +1140,7 @@ static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu &sh->ops.zero_sum_result, percpu->spare_page, &submit); } -static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) +static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) { int overlap_clear = 0, i, disks = sh->disks; struct dma_async_tx_descriptor *tx = NULL; @@ -1204,22 +1205,55 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) put_cpu(); } +#ifdef CONFIG_MULTICORE_RAID456 +static void async_run_ops(void *param, async_cookie_t cookie) +{ + struct stripe_head *sh = param; + unsigned long ops_request = sh->ops.request; + + clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); + wake_up(&sh->ops.wait_for_ops); + + __raid_run_ops(sh, ops_request); + release_stripe(sh); +} + +static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) +{ + /* since handle_stripe can be called outside of raid5d context + * we need to ensure sh->ops.request is de-staged before another + * request arrives + */ + wait_event(sh->ops.wait_for_ops, + !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); + sh->ops.request = ops_request; + + atomic_inc(&sh->count); + async_schedule(async_run_ops, sh); +} +#else +#define raid_run_ops __raid_run_ops +#endif + static int grow_one_stripe(raid5_conf_t *conf) { struct stripe_head *sh; + int disks = max(conf->raid_disks, conf->previous_raid_disks); sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); if (!sh) return 0; - memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); + memset(sh, 0, sizeof(*sh) + (disks-1)*sizeof(struct r5dev)); sh->raid_conf = conf; spin_lock_init(&sh->lock); + #ifdef CONFIG_MULTICORE_RAID456 + init_waitqueue_head(&sh->ops.wait_for_ops); + #endif - if (grow_buffers(sh, conf->raid_disks)) { - shrink_buffers(sh, conf->raid_disks); + if (grow_buffers(sh, disks)) { + shrink_buffers(sh, disks); kmem_cache_free(conf->slab_cache, sh); return 0; } - sh->disks = conf->raid_disks; /* we just created an active stripe so... */ atomic_set(&sh->count, 1); atomic_inc(&conf->active_stripes); @@ -1231,7 +1265,7 @@ static int grow_one_stripe(raid5_conf_t *conf) static int grow_stripes(raid5_conf_t *conf, int num) { struct kmem_cache *sc; - int devs = conf->raid_disks; + int devs = max(conf->raid_disks, conf->previous_raid_disks); sprintf(conf->cache_name[0], "raid%d-%s", conf->level, mdname(conf->mddev)); @@ -1329,6 +1363,9 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) nsh->raid_conf = conf; spin_lock_init(&nsh->lock); + #ifdef CONFIG_MULTICORE_RAID456 + init_waitqueue_head(&nsh->ops.wait_for_ops); + #endif list_add(&nsh->lru, &newstripes); } @@ -1899,10 +1936,15 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) case ALGORITHM_PARITY_N: break; case ALGORITHM_ROTATING_N_CONTINUE: + /* Like left_symmetric, but P is before Q */ if (sh->pd_idx == 0) i--; /* P D D D Q */ - else if (i > sh->pd_idx) - i -= 2; /* D D Q P D */ + else { + /* D D Q P D */ + if (i < sh->pd_idx) + i += raid_disks; + i -= (sh->pd_idx + 1); + } break; case ALGORITHM_LEFT_ASYMMETRIC_6: case ALGORITHM_RIGHT_ASYMMETRIC_6: @@ -2896,7 +2938,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, * */ -static bool handle_stripe5(struct stripe_head *sh) +static void handle_stripe5(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks, i; @@ -3167,11 +3209,9 @@ static bool handle_stripe5(struct stripe_head *sh) ops_run_io(sh, &s); return_io(return_bi); - - return blocked_rdev == NULL; } -static bool handle_stripe6(struct stripe_head *sh) +static void handle_stripe6(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; @@ -3455,17 +3495,14 @@ static bool handle_stripe6(struct stripe_head *sh) ops_run_io(sh, &s); return_io(return_bi); - - return blocked_rdev == NULL; } -/* returns true if the stripe was handled */ -static bool handle_stripe(struct stripe_head *sh) +static void handle_stripe(struct stripe_head *sh) { if (sh->raid_conf->level == 6) - return handle_stripe6(sh); + handle_stripe6(sh); else - return handle_stripe5(sh); + handle_stripe5(sh); } static void raid5_activate_delayed(raid5_conf_t *conf) @@ -3503,9 +3540,10 @@ static void unplug_slaves(mddev_t *mddev) { raid5_conf_t *conf = mddev->private; int i; + int devs = max(conf->raid_disks, conf->previous_raid_disks); rcu_read_lock(); - for (i = 0; i < conf->raid_disks; i++) { + for (i = 0; i < devs; i++) { mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { struct request_queue *r_queue = bdev_get_queue(rdev->bdev); @@ -4277,9 +4315,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski clear_bit(STRIPE_INSYNC, &sh->state); spin_unlock(&sh->lock); - /* wait for any blocked device to be handled */ - while (unlikely(!handle_stripe(sh))) - ; + handle_stripe(sh); release_stripe(sh); return STRIPE_SECTORS; @@ -4349,37 +4385,6 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) return handled; } -#ifdef CONFIG_MULTICORE_RAID456 -static void __process_stripe(void *param, async_cookie_t cookie) -{ - struct stripe_head *sh = param; - - handle_stripe(sh); - release_stripe(sh); -} - -static void process_stripe(struct stripe_head *sh, struct list_head *domain) -{ - async_schedule_domain(__process_stripe, sh, domain); -} - -static void synchronize_stripe_processing(struct list_head *domain) -{ - async_synchronize_full_domain(domain); -} -#else -static void process_stripe(struct stripe_head *sh, struct list_head *domain) -{ - handle_stripe(sh); - release_stripe(sh); - cond_resched(); -} - -static void synchronize_stripe_processing(struct list_head *domain) -{ -} -#endif - /* * This is our raid5 kernel thread. @@ -4393,7 +4398,6 @@ static void raid5d(mddev_t *mddev) struct stripe_head *sh; raid5_conf_t *conf = mddev->private; int handled; - LIST_HEAD(raid_domain); pr_debug("+++ raid5d active\n"); @@ -4430,7 +4434,9 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); handled++; - process_stripe(sh, &raid_domain); + handle_stripe(sh); + release_stripe(sh); + cond_resched(); spin_lock_irq(&conf->device_lock); } @@ -4438,7 +4444,6 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); - synchronize_stripe_processing(&raid_domain); async_tx_issue_pending_all(); unplug_slaves(mddev); @@ -4558,13 +4563,9 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) if (!sectors) sectors = mddev->dev_sectors; - if (!raid_disks) { + if (!raid_disks) /* size is defined by the smallest of previous and new size */ - if (conf->raid_disks < conf->previous_raid_disks) - raid_disks = conf->raid_disks; - else - raid_disks = conf->previous_raid_disks; - } + raid_disks = min(conf->raid_disks, conf->previous_raid_disks); sectors &= ~((sector_t)mddev->chunk_sectors - 1); sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); @@ -4665,7 +4666,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) } per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; } - scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); + scribble = kmalloc(conf->scribble_len, GFP_KERNEL); if (!scribble) { err = -ENOMEM; break; @@ -4686,7 +4687,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) static raid5_conf_t *setup_conf(mddev_t *mddev) { raid5_conf_t *conf; - int raid_disk, memory; + int raid_disk, memory, max_disks; mdk_rdev_t *rdev; struct disk_info *disk; @@ -4722,15 +4723,28 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); if (conf == NULL) goto abort; + spin_lock_init(&conf->device_lock); + init_waitqueue_head(&conf->wait_for_stripe); + init_waitqueue_head(&conf->wait_for_overlap); + INIT_LIST_HEAD(&conf->handle_list); + INIT_LIST_HEAD(&conf->hold_list); + INIT_LIST_HEAD(&conf->delayed_list); + INIT_LIST_HEAD(&conf->bitmap_list); + INIT_LIST_HEAD(&conf->inactive_list); + atomic_set(&conf->active_stripes, 0); + atomic_set(&conf->preread_active_stripes, 0); + atomic_set(&conf->active_aligned_reads, 0); + conf->bypass_threshold = BYPASS_THRESHOLD; conf->raid_disks = mddev->raid_disks; - conf->scribble_len = scribble_len(conf->raid_disks); if (mddev->reshape_position == MaxSector) conf->previous_raid_disks = mddev->raid_disks; else conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; + max_disks = max(conf->raid_disks, conf->previous_raid_disks); + conf->scribble_len = scribble_len(max_disks); - conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), + conf->disks = kzalloc(max_disks * sizeof(struct disk_info), GFP_KERNEL); if (!conf->disks) goto abort; @@ -4744,24 +4758,11 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if (raid5_alloc_percpu(conf) != 0) goto abort; - spin_lock_init(&conf->device_lock); - init_waitqueue_head(&conf->wait_for_stripe); - init_waitqueue_head(&conf->wait_for_overlap); - INIT_LIST_HEAD(&conf->handle_list); - INIT_LIST_HEAD(&conf->hold_list); - INIT_LIST_HEAD(&conf->delayed_list); - INIT_LIST_HEAD(&conf->bitmap_list); - INIT_LIST_HEAD(&conf->inactive_list); - atomic_set(&conf->active_stripes, 0); - atomic_set(&conf->preread_active_stripes, 0); - atomic_set(&conf->active_aligned_reads, 0); - conf->bypass_threshold = BYPASS_THRESHOLD; - pr_debug("raid5: run(%s) called.\n", mdname(mddev)); list_for_each_entry(rdev, &mddev->disks, same_set) { raid_disk = rdev->raid_disk; - if (raid_disk >= conf->raid_disks + if (raid_disk >= max_disks || raid_disk < 0) continue; disk = conf->disks + raid_disk; @@ -4793,7 +4794,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) } memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + - conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; + max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; if (grow_stripes(conf, conf->max_nr_stripes)) { printk(KERN_ERR "raid5: couldn't allocate %dkB for buffers\n", memory); @@ -4918,7 +4919,8 @@ static int run(mddev_t *mddev) test_bit(In_sync, &rdev->flags)) working_disks++; - mddev->degraded = conf->raid_disks - working_disks; + mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) + - working_disks); if (mddev->degraded > conf->max_degraded) { printk(KERN_ERR "raid5: not enough operational devices for %s" diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2390e0e83da..dd708359b45 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -214,12 +214,20 @@ struct stripe_head { int disks; /* disks in stripe */ enum check_states check_state; enum reconstruct_states reconstruct_state; - /* stripe_operations + /** + * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target + * @target2 - 2nd compute target in the raid6 case + * @zero_sum_result - P and Q verification flags + * @request - async service request flags for raid_run_ops */ struct stripe_operations { int target, target2; enum sum_check_flags zero_sum_result; + #ifdef CONFIG_MULTICORE_RAID456 + unsigned long request; + wait_queue_head_t wait_for_ops; + #endif } ops; struct r5dev { struct bio req; @@ -294,6 +302,8 @@ struct r6_state { #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ #define STRIPE_BIOFILL_RUN 14 #define STRIPE_COMPUTE_RUN 15 +#define STRIPE_OPS_REQ_PENDING 16 + /* * Operation request flags */ @@ -478,7 +488,7 @@ static inline int algorithm_valid_raid6(int layout) { return (layout >= 0 && layout <= 5) || - (layout == 8 || layout == 10) + (layout >= 8 && layout <= 10) || (layout >= 16 && layout <= 20); } diff --git a/drivers/md/raid6altivec.uc b/drivers/md/raid6altivec.uc index 699dfeee494..2654d5c854b 100644 --- a/drivers/md/raid6altivec.uc +++ b/drivers/md/raid6altivec.uc @@ -15,7 +15,7 @@ * * $#-way unrolled portable integer math RAID-6 instruction set * - * This file is postprocessed using unroll.pl + * This file is postprocessed using unroll.awk * * <benh> hpa: in process, * you can just "steal" the vec unit with enable_kernel_altivec() (but diff --git a/drivers/md/raid6int.uc b/drivers/md/raid6int.uc index f9bf9cba357..d1e276a14fa 100644 --- a/drivers/md/raid6int.uc +++ b/drivers/md/raid6int.uc @@ -15,7 +15,7 @@ * * $#-way unrolled portable integer math RAID-6 instruction set * - * This file is postprocessed using unroll.pl + * This file is postprocessed using unroll.awk */ #include <linux/raid/pq.h> diff --git a/drivers/md/raid6test/Makefile b/drivers/md/raid6test/Makefile index 58ffdf4f516..2874cbef529 100644 --- a/drivers/md/raid6test/Makefile +++ b/drivers/md/raid6test/Makefile @@ -7,7 +7,7 @@ CC = gcc OPTFLAGS = -O2 # Adjust as desired CFLAGS = -I.. -I ../../../include -g $(OPTFLAGS) LD = ld -PERL = perl +AWK = awk AR = ar RANLIB = ranlib @@ -35,35 +35,35 @@ raid6.a: raid6int1.o raid6int2.o raid6int4.o raid6int8.o raid6int16.o \ raid6test: test.c raid6.a $(CC) $(CFLAGS) -o raid6test $^ -raid6altivec1.c: raid6altivec.uc ../unroll.pl - $(PERL) ../unroll.pl 1 < raid6altivec.uc > $@ +raid6altivec1.c: raid6altivec.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=1 < raid6altivec.uc > $@ -raid6altivec2.c: raid6altivec.uc ../unroll.pl - $(PERL) ../unroll.pl 2 < raid6altivec.uc > $@ +raid6altivec2.c: raid6altivec.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=2 < raid6altivec.uc > $@ -raid6altivec4.c: raid6altivec.uc ../unroll.pl - $(PERL) ../unroll.pl 4 < raid6altivec.uc > $@ +raid6altivec4.c: raid6altivec.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=4 < raid6altivec.uc > $@ -raid6altivec8.c: raid6altivec.uc ../unroll.pl - $(PERL) ../unroll.pl 8 < raid6altivec.uc > $@ +raid6altivec8.c: raid6altivec.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=8 < raid6altivec.uc > $@ -raid6int1.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 1 < raid6int.uc > $@ +raid6int1.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=1 < raid6int.uc > $@ -raid6int2.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 2 < raid6int.uc > $@ +raid6int2.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=2 < raid6int.uc > $@ -raid6int4.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 4 < raid6int.uc > $@ +raid6int4.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=4 < raid6int.uc > $@ -raid6int8.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 8 < raid6int.uc > $@ +raid6int8.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=8 < raid6int.uc > $@ -raid6int16.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 16 < raid6int.uc > $@ +raid6int16.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=16 < raid6int.uc > $@ -raid6int32.c: raid6int.uc ../unroll.pl - $(PERL) ../unroll.pl 32 < raid6int.uc > $@ +raid6int32.c: raid6int.uc ../unroll.awk + $(AWK) ../unroll.awk -vN=32 < raid6int.uc > $@ raid6tables.c: mktables ./mktables > raid6tables.c diff --git a/drivers/md/unroll.awk b/drivers/md/unroll.awk new file mode 100644 index 00000000000..c6aa03631df --- /dev/null +++ b/drivers/md/unroll.awk @@ -0,0 +1,20 @@ + +# This filter requires one command line option of form -vN=n +# where n must be a decimal number. +# +# Repeat each input line containing $$ n times, replacing $$ with 0...n-1. +# Replace each $# with n, and each $* with a single $. + +BEGIN { + n = N + 0 +} +{ + if (/\$\$/) { rep = n } else { rep = 1 } + for (i = 0; i < rep; ++i) { + tmp = $0 + gsub(/\$\$/, i, tmp) + gsub(/\$\#/, n, tmp) + gsub(/\$\*/, "$", tmp) + print tmp + } +} diff --git a/drivers/md/unroll.pl b/drivers/md/unroll.pl deleted file mode 100644 index 3acc710a20e..00000000000 --- a/drivers/md/unroll.pl +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/perl -# -# Take a piece of C code and for each line which contains the sequence $$ -# repeat n times with $ replaced by 0...n-1; the sequence $# is replaced -# by the unrolling factor, and $* with a single $ -# - -($n) = @ARGV; -$n += 0; - -while ( defined($line = <STDIN>) ) { - if ( $line =~ /\$\$/ ) { - $rep = $n; - } else { - $rep = 1; - } - for ( $i = 0 ; $i < $rep ; $i++ ) { - $tmp = $line; - $tmp =~ s/\$\$/$i/g; - $tmp =~ s/\$\#/$n/g; - $tmp =~ s/\$\*/\$/g; - print $tmp; - } -} diff --git a/drivers/mfd/twl4030-core.c b/drivers/mfd/twl4030-core.c index e832e975da6..a1c47ee95c0 100644 --- a/drivers/mfd/twl4030-core.c +++ b/drivers/mfd/twl4030-core.c @@ -795,7 +795,7 @@ twl4030_probe(struct i2c_client *client, const struct i2c_device_id *id) twl->client = i2c_new_dummy(client->adapter, twl->address); if (!twl->client) { - dev_err(&twl->client->dev, + dev_err(&client->dev, "can't attach client %d\n", i); status = -ENOMEM; goto fail; diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index d3015dfb913..ac056ea6b66 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c @@ -507,6 +507,8 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) { int i, ret; + mutex_init(&wm831x->irq_lock); + if (!irq) { dev_warn(wm831x->dev, "No interrupt specified - functionality limited\n"); @@ -521,7 +523,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) } wm831x->irq = irq; - mutex_init(&wm831x->irq_lock); INIT_WORK(&wm831x->irq_work, wm831x_irq_worker); /* Mask the individual interrupt sources */ diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c index e556d42cc45..63924e0c7ea 100644 --- a/drivers/mmc/host/at91_mci.c +++ b/drivers/mmc/host/at91_mci.c @@ -72,7 +72,6 @@ #include <asm/irq.h> #include <asm/gpio.h> -#include <asm/mach/mmc.h> #include <mach/board.h> #include <mach/cpu.h> #include <mach/at91_mci.h> diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 4487cc09791..0aecaaebef3 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -2013,7 +2013,7 @@ static struct platform_driver omap_hsmmc_driver = { static int __init omap_hsmmc_init(void) { /* Register the MMC driver */ - return platform_driver_register(&omap_hsmmc_driver); + return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe); } static void __exit omap_hsmmc_cleanup(void) diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index e1f7d0a78b9..14cec04c34f 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c @@ -42,6 +42,7 @@ #include <linux/log2.h> #include <linux/kthread.h> #include <linux/reboot.h> +#include <linux/kernel.h> #include "ubi.h" /* Maximum length of the 'mtd=' parameter */ @@ -1257,7 +1258,7 @@ static int __init bytes_str_to_int(const char *str) unsigned long result; result = simple_strtoul(str, &endp, 0); - if (str == endp || result < 0) { + if (str == endp || result >= INT_MAX) { printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", str); return -EINVAL; diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c index e7161adc419..90af61a2c3e 100644 --- a/drivers/mtd/ubi/scan.c +++ b/drivers/mtd/ubi/scan.c @@ -794,16 +794,15 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, * number. */ image_seq = be32_to_cpu(ech->image_seq); - if (!si->image_seq_set) { + if (!ubi->image_seq && image_seq) ubi->image_seq = image_seq; - si->image_seq_set = 1; - } else if (ubi->image_seq && ubi->image_seq != image_seq) { + if (ubi->image_seq && image_seq && + ubi->image_seq != image_seq) { ubi_err("bad image sequence number %d in PEB %d, " "expected %d", image_seq, pnum, ubi->image_seq); ubi_dbg_dump_ec_hdr(ech); return -EINVAL; } - } /* OK, we've done with the EC header, let's look at the VID header */ diff --git a/drivers/mtd/ubi/scan.h b/drivers/mtd/ubi/scan.h index bab31695dac..ff179ad7ca5 100644 --- a/drivers/mtd/ubi/scan.h +++ b/drivers/mtd/ubi/scan.h @@ -103,7 +103,6 @@ struct ubi_scan_volume { * @ec_sum: a temporary variable used when calculating @mean_ec * @ec_count: a temporary variable used when calculating @mean_ec * @corr_count: count of corrupted PEBs - * @image_seq_set: indicates @ubi->image_seq is known * * This data structure contains the result of scanning and may be used by other * UBI sub-systems to build final UBI data structures, further error-recovery @@ -127,7 +126,6 @@ struct ubi_scan_info { uint64_t ec_sum; int ec_count; int corr_count; - int image_seq_set; }; struct ubi_device; diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 712776089b4..e19ca4bb751 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -1741,6 +1741,7 @@ config KS8851 config KS8851_MLL tristate "Micrel KS8851 MLL" depends on HAS_IOMEM + select MII help This platform driver is for Micrel KS8851 Address/data bus multiplexed network chip. @@ -2482,6 +2483,8 @@ config S6GMAC To compile this driver as a module, choose M here. The module will be called s6gmac. +source "drivers/net/stmmac/Kconfig" + endif # NETDEV_1000 # @@ -3230,4 +3233,12 @@ config VIRTIO_NET This is the virtual network driver for virtio. It can be used with lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. +config VMXNET3 + tristate "VMware VMXNET3 ethernet driver" + depends on PCI && X86 && INET + help + This driver supports VMware's vmxnet3 virtual ethernet NIC. + To compile this driver as a module, choose M here: the + module will be called vmxnet3. + endif # NETDEVICES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index d866b8cf65d..246323d7f16 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -2,6 +2,10 @@ # Makefile for the Linux network (ethercard) device drivers. # +obj-$(CONFIG_MII) += mii.o +obj-$(CONFIG_MDIO) += mdio.o +obj-$(CONFIG_PHYLIB) += phy/ + obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o obj-$(CONFIG_E1000) += e1000/ @@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o obj-$(CONFIG_ENIC) += enic/ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_BE2NET) += benet/ +obj-$(CONFIG_VMXNET3) += vmxnet3/ gianfar_driver-objs := gianfar.o \ gianfar_ethtool.o \ @@ -95,15 +100,12 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o obj-$(CONFIG_RIONET) += rionet.o obj-$(CONFIG_SH_ETH) += sh_eth.o +obj-$(CONFIG_STMMAC_ETH) += stmmac/ # # end link order section # -obj-$(CONFIG_MII) += mii.o -obj-$(CONFIG_MDIO) += mdio.o -obj-$(CONFIG_PHYLIB) += phy/ - obj-$(CONFIG_SUNDANCE) += sundance.o obj-$(CONFIG_HAMACHI) += hamachi.o obj-$(CONFIG_NET) += Space.o loopback.o diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 5f0b05c2d71..d82a9a99475 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c @@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev) memset(ap->info, 0, sizeof(struct ace_info)); memset(ap->skb, 0, sizeof(struct ace_skb)); - if (ace_load_firmware(dev)) + ecode = ace_load_firmware(dev); + if (ecode) goto init_error; ap->fw_running = 0; diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 04f63c77071..ce6f1ac25df 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c @@ -34,6 +34,7 @@ * * */ +#include <linux/capability.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c index 89876ade5e3..28a0eda9268 100644 --- a/drivers/net/benet/be_cmds.c +++ b/drivers/net/benet/be_cmds.c @@ -243,15 +243,26 @@ static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) int be_cmd_POST(struct be_adapter *adapter) { - u16 stage, error; + u16 stage; + int status, timeout = 0; - error = be_POST_stage_get(adapter, &stage); - if (error || stage != POST_STAGE_ARMFW_RDY) { - dev_err(&adapter->pdev->dev, "POST failed.\n"); - return -1; - } + do { + status = be_POST_stage_get(adapter, &stage); + if (status) { + dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n", + stage); + return -1; + } else if (stage != POST_STAGE_ARMFW_RDY) { + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(2 * HZ); + timeout += 2; + } else { + return 0; + } + } while (timeout < 20); - return 0; + dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage); + return -1; } static inline void *embedded_payload(struct be_mcc_wrb *wrb) @@ -729,8 +740,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, /* Create an rx filtering policy configuration on an i/f * Uses mbox */ -int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, - bool pmac_invalid, u32 *if_handle, u32 *pmac_id) +int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, + u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_create *req; @@ -746,8 +757,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 flags, u8 *mac, be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); - req->capability_flags = cpu_to_le32(flags); - req->enable_flags = cpu_to_le32(flags); + req->capability_flags = cpu_to_le32(cap_flags); + req->enable_flags = cpu_to_le32(en_flags); req->pmac_invalid = pmac_invalid; if (!pmac_invalid) memcpy(req->mac_addr, mac, ETH_ALEN); diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h index a86f917f85f..49953787e41 100644 --- a/drivers/net/benet/be_cmds.h +++ b/drivers/net/benet/be_cmds.h @@ -720,8 +720,9 @@ extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id); extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); -extern int be_cmd_if_create(struct be_adapter *adapter, u32 if_flags, u8 *mac, - bool pmac_invalid, u32 *if_handle, u32 *pmac_id); +extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, + u32 en_flags, u8 *mac, bool pmac_invalid, + u32 *if_handle, u32 *pmac_id); extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay); diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 6d5e81f7046..1f941f02771 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c @@ -1620,19 +1620,22 @@ static int be_open(struct net_device *netdev) static int be_setup(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; - u32 if_flags; + u32 cap_flags, en_flags; int status; - if_flags = BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PROMISCUOUS | - BE_IF_FLAGS_MCAST_PROMISCUOUS | BE_IF_FLAGS_UNTAGGED | - BE_IF_FLAGS_PASS_L3L4_ERRORS; - status = be_cmd_if_create(adapter, if_flags, netdev->dev_addr, - false/* pmac_invalid */, &adapter->if_handle, - &adapter->pmac_id); + cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_MCAST_PROMISCUOUS | + BE_IF_FLAGS_PROMISCUOUS | + BE_IF_FLAGS_PASS_L3L4_ERRORS; + en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | + BE_IF_FLAGS_PASS_L3L4_ERRORS; + + status = be_cmd_if_create(adapter, cap_flags, en_flags, + netdev->dev_addr, false/* pmac_invalid */, + &adapter->if_handle, &adapter->pmac_id); if (status != 0) goto do_none; - status = be_tx_queues_create(adapter); if (status != 0) goto if_destroy; @@ -2055,6 +2058,10 @@ static int be_hw_up(struct be_adapter *adapter) if (status) return status; + status = be_cmd_reset_function(adapter); + if (status) + return status; + status = be_cmd_get_fw_ver(adapter, adapter->fw_ver); if (status) return status; @@ -2108,10 +2115,6 @@ static int __devinit be_probe(struct pci_dev *pdev, if (status) goto free_netdev; - status = be_cmd_reset_function(adapter); - if (status) - goto ctrl_clean; - status = be_stats_init(adapter); if (status) goto ctrl_clean; diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 6c7f795d12d..a4d83409f20 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h @@ -361,9 +361,12 @@ struct l2_fhdr { #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE (1<<28) #define BNX2_L2CTX_HOST_BDIDX 0x00000004 -#define BNX2_L2CTX_STATUSB_NUM_SHIFT 16 -#define BNX2_L2CTX_STATUSB_NUM(sb_id) \ - (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT 16 +#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT 24 +#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0) +#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id) \ + (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0) #define BNX2_L2CTX_HOST_BSEQ 0x00000008 #define BNX2_L2CTX_NX_BSEQ 0x0000000c #define BNX2_L2CTX_NX_BDHADDR_HI 0x00000010 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69c5b15e22d..40fb5eefc72 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -691,7 +691,7 @@ static int bond_check_dev_link(struct bonding *bond, struct net_device *slave_dev, int reporting) { const struct net_device_ops *slave_ops = slave_dev->netdev_ops; - static int (*ioctl)(struct net_device *, struct ifreq *, int); + int (*ioctl)(struct net_device *, struct ifreq *, int); struct ifreq ifr; struct mii_ioctl_data *mii; @@ -3665,10 +3665,10 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, if (skb->protocol == htons(ETH_P_IP)) { return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^ - (data->h_dest[5] ^ bond_dev->dev_addr[5])) % count; + (data->h_dest[5] ^ data->h_source[5])) % count; } - return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; + return (data->h_dest[5] ^ data->h_source[5]) % count; } /* @@ -3695,7 +3695,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb, } - return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; + return (data->h_dest[5] ^ data->h_source[5]) % count; } /* @@ -3706,7 +3706,7 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, { struct ethhdr *data = (struct ethhdr *)skb->data; - return (data->h_dest[5] ^ bond_dev->dev_addr[5]) % count; + return (data->h_dest[5] ^ data->h_source[5]) % count; } /*-------------------------- Device entry points ----------------------------*/ diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c index 3373560405b..9dd076a626a 100644 --- a/drivers/net/can/sja1000/sja1000_of_platform.c +++ b/drivers/net/can/sja1000/sja1000_of_platform.c @@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = { {.compatible = "nxp,sja1000"}, {}, }; +MODULE_DEVICE_TABLE(of, sja1000_ofp_table); static struct of_platform_driver sja1000_ofp_driver = { .owner = THIS_MODULE, diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 46c87ec7960..3bf1b04f2ca 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c @@ -2264,9 +2264,9 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); if (sb_id == 0) - val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT; + val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; else - val = BNX2_L2CTX_STATUSB_NUM(sb_id); + val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE); @@ -2423,7 +2423,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev) cp->int_num = 0; if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { u32 sb_id = cp->status_blk_num; - u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id); + u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 65a2d0ba64e..f72c56dec33 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c @@ -333,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) +/* EMAC Stats Clear Mask */ +#define EMAC_STATS_CLR_MASK (0xFFFFFFFF) + /** net_buf_obj: EMAC network bufferdata structure * * EMAC network buffer data structure @@ -2548,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev) static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) { struct emac_priv *priv = netdev_priv(ndev); + u32 mac_control; + u32 stats_clear_mask; /* update emac hardware stats and reset the registers*/ + mac_control = emac_read(EMAC_MACCONTROL); + + if (mac_control & EMAC_MACCONTROL_GMIIEN) + stats_clear_mask = EMAC_STATS_CLR_MASK; + else + stats_clear_mask = 0; + priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES); - emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) + emac_read(EMAC_TXSINGLECOLL) + emac_read(EMAC_TXMULTICOLL)); - emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXCOLLISION, stats_clear_mask); + emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); + emac_write(EMAC_TXMULTICOLL, stats_clear_mask); priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + emac_read(EMAC_RXJABBER) + emac_read(EMAC_RXUNDERSIZED)); - emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXOVERSIZED, stats_clear_mask); + emac_write(EMAC_RXJABBER, stats_clear_mask); + emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + emac_read(EMAC_RXMOFOVERRUNS)); - emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); - emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); + emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); - emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); priv->net_dev_stats.tx_carrier_errors += emac_read(EMAC_TXCARRIERSENSE); - emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN); - emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE); + emac_write(EMAC_TXUNDERRUN, stats_clear_mask); return &priv->net_dev_stats; } diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h index 80817c2edfb..fb1c924d79b 100644 --- a/drivers/net/dm9000.h +++ b/drivers/net/dm9000.h @@ -50,7 +50,7 @@ #define DM9000_RCSR 0x32 #define CHIPR_DM9000A 0x19 -#define CHIPR_DM9000B 0x1B +#define CHIPR_DM9000B 0x1A #define DM9000_MRCMDX 0xF0 #define DM9000_MRCMD 0xF2 diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 5d2f48f0225..3c29a20b751 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c @@ -1427,19 +1427,31 @@ static int e100_phy_init(struct nic *nic) } else DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); - /* Isolate all the PHY ids */ - for (addr = 0; addr < 32; addr++) - mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); - /* Select the discovered PHY */ - bmcr &= ~BMCR_ISOLATE; - mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); - /* Get phy ID */ id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); nic->phy = (u32)id_hi << 16 | (u32)id_lo; DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); + /* Select the phy and isolate the rest */ + for (addr = 0; addr < 32; addr++) { + if (addr != nic->mii.phy_id) { + mdio_write(netdev, addr, MII_BMCR, BMCR_ISOLATE); + } else if (nic->phy != phy_82552_v) { + bmcr = mdio_read(netdev, addr, MII_BMCR); + mdio_write(netdev, addr, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + } + } + /* + * Workaround for 82552: + * Clear the ISOLATE bit on selected phy_id last (mirrored on all + * other phy_id's) using bmcr value from addr discovery loop above. + */ + if (nic->phy == phy_82552_v) + mdio_write(netdev, nic->mii.phy_id, MII_BMCR, + bmcr & ~BMCR_ISOLATE); + /* Handle National tx phys */ #define NCS_PHY_MODEL_MASK 0xFFF0FFFF if ((nic->phy & NCS_PHY_MODEL_MASK) == phy_nsc_tx) { diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h index c0f185beb8b..1190167a8b3 100644 --- a/drivers/net/e1000e/defines.h +++ b/drivers/net/e1000e/defines.h @@ -76,6 +76,7 @@ /* Extended Device Control */ #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Definable Pin 7 */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ #define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 @@ -347,6 +348,7 @@ /* Extended Configuration Control and Size */ #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 #define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 #define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 #define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h index 981936c1fb4..189dfa2d6c7 100644 --- a/drivers/net/e1000e/e1000.h +++ b/drivers/net/e1000e/e1000.h @@ -141,6 +141,20 @@ struct e1000_info; #define HV_TNCRS_UPPER PHY_REG(778, 29) /* Transmit with no CRS */ #define HV_TNCRS_LOWER PHY_REG(778, 30) +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + enum e1000_boards { board_82571, board_82572, @@ -519,9 +533,13 @@ extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 *data); extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, + u16 data); extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw); extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); extern s32 e1000e_get_cfg_done(struct e1000_hw *hw); @@ -538,7 +556,11 @@ extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 data); extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, + u16 *data); extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, u32 usec_interval, bool *success); extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); @@ -546,7 +568,11 @@ extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); extern s32 e1000e_check_downshift(struct e1000_hw *hw); extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 *data); extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, + u16 data); extern s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow); extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h index fd44d9f9076..aaea41ef794 100644 --- a/drivers/net/e1000e/hw.h +++ b/drivers/net/e1000e/hw.h @@ -764,11 +764,13 @@ struct e1000_phy_operations { s32 (*get_cable_length)(struct e1000_hw *); s32 (*get_phy_info)(struct e1000_hw *); s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *); void (*release_phy)(struct e1000_hw *); s32 (*reset_phy)(struct e1000_hw *); s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); s32 (*write_phy_reg)(struct e1000_hw *, u32, u16); + s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16); s32 (*cfg_on_link_up)(struct e1000_hw *); }; @@ -901,6 +903,7 @@ struct e1000_shadow_ram { struct e1000_dev_spec_ich8lan { bool kmrn_lock_loss_workaround_enabled; struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; }; struct e1000_hw { diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 99df2abf82a..51ddb04ab19 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c @@ -122,6 +122,27 @@ #define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + /* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ /* Offset 04h HSFSTS */ union ich8_hws_flash_status { @@ -200,6 +221,10 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); static s32 e1000_led_on_pchlan(struct e1000_hw *hw); static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) { @@ -242,7 +267,11 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan; phy->ops.read_phy_reg = e1000_read_phy_reg_hv; + phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; phy->ops.write_phy_reg = e1000_write_phy_reg_hv; + phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; phy->id = e1000_phy_unknown; @@ -303,6 +332,8 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) case IGP03E1000_E_PHY_ID: phy->type = e1000_phy_igp_3; phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked; break; case IFE_E_PHY_ID: case IFE_PLUS_E_PHY_ID: @@ -469,14 +500,6 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) goto out; } - if (hw->mac.type == e1000_pchlan) { - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - E1000_KMRNCTRLSTA_K1_ENABLE); - if (ret_val) - goto out; - } - /* * First we want to see if the MII Status Register reports * link. If so, then we want to get the current speed/duplex @@ -486,6 +509,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) goto out; + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + goto out; + } + if (!link) goto out; /* No link detected */ @@ -568,12 +597,39 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) static DEFINE_MUTEX(nvm_mutex); /** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) +{ + mutex_unlock(&nvm_mutex); + + return; +} + +static DEFINE_MUTEX(swflag_mutex); + +/** * e1000_acquire_swflag_ich8lan - Acquire software control flag * @hw: pointer to the HW structure * - * Acquires the software control flag for performing NVM and PHY - * operations. This is a function pointer entry point only called by - * read/write routines for the PHY and NVM parts. + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. **/ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) { @@ -582,7 +638,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) might_sleep(); - mutex_lock(&nvm_mutex); + mutex_lock(&swflag_mutex); while (timeout) { extcnf_ctrl = er32(EXTCNF_CTRL); @@ -599,7 +655,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) goto out; } - timeout = PHY_CFG_TIMEOUT * 2; + timeout = SW_FLAG_TIMEOUT; extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); @@ -623,7 +679,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) out: if (ret_val) - mutex_unlock(&nvm_mutex); + mutex_unlock(&swflag_mutex); return ret_val; } @@ -632,9 +688,8 @@ out: * e1000_release_swflag_ich8lan - Release software control flag * @hw: pointer to the HW structure * - * Releases the software control flag for performing NVM and PHY operations. - * This is a function pointer entry point only called by read/write - * routines for the PHY and NVM parts. + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. **/ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) { @@ -644,7 +699,9 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; ew32(EXTCNF_CTRL, extcnf_ctrl); - mutex_unlock(&nvm_mutex); + mutex_unlock(&swflag_mutex); + + return; } /** @@ -752,6 +809,326 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw) } /** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + /* + * Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) || + (hw->mac.type == e1000_pchlan)) { + struct e1000_adapter *adapter = hw->adapter; + + /* Check if SW needs to configure the PHY */ + if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || + (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) || + (hw->mac.type == e1000_pchlan)) + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + else + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto out; + + /* Wait for basic configuration completes before proceeding */ + e1000_lan_init_done_ich8lan(hw); + + /* + * Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) + goto out; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto out; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && + (hw->mac.type == e1000_pchlan)) { + /* + * HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + data = er32(STRAP); + data &= E1000_STRAP_SMBUS_ADDRESS_MASK; + reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT; + reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, + reg_data); + if (ret_val) + goto out; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, + HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto out; + } + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, + ®_data); + if (ret_val) + goto out; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto out; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = phy->ops.write_phy_reg_locked(hw, + (u32)reg_addr, + reg_data); + if (ret_val) + goto out; + } + } + +out: + hw->phy.ops.release_phy(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + goto out; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = hw->phy.ops.read_phy_reg_locked(hw, + BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK; + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = hw->phy.ops.read_phy_reg_locked(hw, + HV_M_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK; + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), + 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = hw->phy.ops.write_phy_reg_locked(hw, PHY_REG(770, 19), + 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release_phy(hw); +out: + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +static s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val = 0; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto out; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + goto out; + + udelay(20); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + udelay(20); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + udelay(20); + +out: + return ret_val; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type != e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto out; + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto out; + + mac_reg = er32(PHY_CTRL); + + ret_val = hw->phy.ops.read_phy_reg_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & E1000_PHY_CTRL_NOND0A_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_NOND0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } + /* Restart auto-neg to activate the bits */ + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg); + +out: + hw->phy.ops.release_phy(hw); + + return ret_val; +} + + +/** * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be * done after every PHY reset. **/ @@ -791,10 +1168,20 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) ret_val = hw->phy.ops.acquire_phy(hw); if (ret_val) return ret_val; + hw->phy.addr = 1; - e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + if (ret_val) + goto out; hw->phy.ops.release_phy(hw); + /* + * Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + +out: return ret_val; } @@ -840,11 +1227,8 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) { - struct e1000_phy_info *phy = &hw->phy; - u32 i; - u32 data, cnf_size, cnf_base_addr, sw_cfg_mask; - s32 ret_val; - u16 word_addr, reg_data, reg_addr, phy_page = 0; + s32 ret_val = 0; + u16 reg; ret_val = e1000e_phy_hw_reset_generic(hw); if (ret_val) @@ -859,81 +1243,20 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) return ret_val; } - /* - * Initialize the PHY from the NVM on ICH platforms. This - * is needed due to an issue where the NVM configuration is - * not properly autoloaded after power transitions. - * Therefore, after each PHY reset, we will load the - * configuration data out of the NVM manually. - */ - if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) { - struct e1000_adapter *adapter = hw->adapter; - - /* Check if SW needs configure the PHY */ - if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || - (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M)) - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; - else - sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; - - data = er32(FEXTNVM); - if (!(data & sw_cfg_mask)) - return 0; - - /* Wait for basic configuration completes before proceeding */ - e1000_lan_init_done_ich8lan(hw); - - /* - * Make sure HW does not configure LCD from PHY - * extended configuration before SW configuration - */ - data = er32(EXTCNF_CTRL); - if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) - return 0; - - cnf_size = er32(EXTCNF_SIZE); - cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; - cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; - if (!cnf_size) - return 0; - - cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; - cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; - - /* Configure LCD from extended configuration region. */ - - /* cnf_base_addr is in DWORD */ - word_addr = (u16)(cnf_base_addr << 1); - - for (i = 0; i < cnf_size; i++) { - ret_val = e1000_read_nvm(hw, - (word_addr + i * 2), - 1, - ®_data); - if (ret_val) - return ret_val; - - ret_val = e1000_read_nvm(hw, - (word_addr + i * 2 + 1), - 1, - ®_addr); - if (ret_val) - return ret_val; - - /* Save off the PHY page for future writes. */ - if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { - phy_page = reg_data; - continue; - } + /* Dummy read to clear the phy wakeup bit after lcd reset */ + if (hw->mac.type == e1000_pchlan) + e1e_rphy(hw, BM_WUC, ®); - reg_addr |= phy_page; + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; - ret_val = e1e_wphy(hw, (u32)reg_addr, reg_data); - if (ret_val) - return ret_val; - } - } + /* Configure the LCD with the OEM bits in NVM */ + if (hw->mac.type == e1000_pchlan) + ret_val = e1000_oem_bits_config_ich8lan(hw, true); +out: return 0; } @@ -1054,6 +1377,38 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw) } /** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val = 0; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto out; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg); + +out: + return ret_val; +} + +/** * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state * @hw: pointer to the HW structure * @active: TRUE to enable LPLU, FALSE to disable @@ -1314,12 +1669,11 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || (words == 0)) { hw_dbg(hw, "nvm parameter(s) out of bounds\n"); - return -E1000_ERR_NVM; + ret_val = -E1000_ERR_NVM; + goto out; } - ret_val = e1000_acquire_swflag_ich8lan(hw); - if (ret_val) - goto out; + nvm->ops.acquire_nvm(hw); ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); if (ret_val) { @@ -1345,7 +1699,7 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, } } - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); out: if (ret_val) @@ -1603,11 +1957,15 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, return -E1000_ERR_NVM; } + nvm->ops.acquire_nvm(hw); + for (i = 0; i < words; i++) { dev_spec->shadow_ram[offset+i].modified = 1; dev_spec->shadow_ram[offset+i].value = data[i]; } + nvm->ops.release_nvm(hw); + return 0; } @@ -1637,9 +1995,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (nvm->type != e1000_nvm_flash_sw) goto out; - ret_val = e1000_acquire_swflag_ich8lan(hw); - if (ret_val) - goto out; + nvm->ops.acquire_nvm(hw); /* * We're writing to the opposite bank so if we're on bank 1, @@ -1657,7 +2013,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) old_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); if (ret_val) { - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } } else { @@ -1665,7 +2021,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) new_bank_offset = 0; ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); if (ret_val) { - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } } @@ -1723,7 +2079,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) if (ret_val) { /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ hw_dbg(hw, "Flash commit failed.\n"); - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } @@ -1736,7 +2092,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); if (ret_val) { - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } data &= 0xBFFF; @@ -1744,7 +2100,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) act_offset * 2 + 1, (u8)(data >> 8)); if (ret_val) { - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } @@ -1757,7 +2113,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); if (ret_val) { - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); goto out; } @@ -1767,7 +2123,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) dev_spec->shadow_ram[i].value = 0xFFFF; } - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); /* * Reload the EEPROM, or else modifications will not appear @@ -1831,14 +2187,12 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) **/ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) { + struct e1000_nvm_info *nvm = &hw->nvm; union ich8_flash_protected_range pr0; union ich8_hws_flash_status hsfsts; u32 gfpreg; - s32 ret_val; - ret_val = e1000_acquire_swflag_ich8lan(hw); - if (ret_val) - return; + nvm->ops.acquire_nvm(hw); gfpreg = er32flash(ICH_FLASH_GFPREG); @@ -1859,7 +2213,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) hsfsts.hsf_status.flockdn = true; ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); - e1000_release_swflag_ich8lan(hw); + nvm->ops.release_nvm(hw); } /** @@ -2229,6 +2583,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) **/ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) { + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 reg; u32 ctrl, icr, kab; s32 ret_val; @@ -2263,6 +2619,18 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) ew32(PBS, E1000_PBS_16K); } + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting*/ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, ®); + if (ret_val) + return ret_val; + + if (reg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + ctrl = er32(CTRL); if (!e1000_check_reset_block(hw)) { @@ -2304,7 +2672,19 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) hw_dbg(hw, "Auto Read Done did not complete\n"); } } + /* Dummy read to clear the phy wakeup bit after lcd reset */ + if (hw->mac.type == e1000_pchlan) + e1e_rphy(hw, BM_WUC, ®); + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + goto out; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + if (ret_val) + goto out; + } /* * For PCH, this write will make sure that any noise * will be detected as a CRC error and be dropped rather than show up @@ -2323,6 +2703,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) if (hw->mac.type == e1000_pchlan) ret_val = e1000_hv_phy_workarounds_ich8lan(hw); +out: return ret_val; } @@ -2627,14 +3008,6 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, if (ret_val) return ret_val; - if ((hw->mac.type == e1000_pchlan) && (*speed == SPEED_1000)) { - ret_val = e1000e_write_kmrn_reg(hw, - E1000_KMRNCTRLSTA_K1_CONFIG, - E1000_KMRNCTRLSTA_K1_DISABLE); - if (ret_val) - return ret_val; - } - if ((hw->mac.type == e1000_ich8lan) && (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { @@ -2843,9 +3216,8 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw) E1000_PHY_CTRL_GBE_DISABLE; ew32(PHY_CTRL, phy_ctrl); - /* Workaround SWFLAG unexpectedly set during S0->Sx */ if (hw->mac.type == e1000_pchlan) - udelay(500); + e1000_phy_hw_reset_ich8lan(hw); default: break; } @@ -3113,9 +3485,9 @@ static struct e1000_phy_operations ich8_phy_ops = { }; static struct e1000_nvm_operations ich8_nvm_ops = { - .acquire_nvm = e1000_acquire_swflag_ich8lan, + .acquire_nvm = e1000_acquire_nvm_ich8lan, .read_nvm = e1000_read_nvm_ich8lan, - .release_nvm = e1000_release_swflag_ich8lan, + .release_nvm = e1000_release_nvm_ich8lan, .update_nvm = e1000_update_nvm_checksum_ich8lan, .valid_led_default = e1000_valid_led_default_ich8lan, .validate_nvm = e1000_validate_nvm_checksum_ich8lan, diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c index 994401fd066..03175b3a2c9 100644 --- a/drivers/net/e1000e/phy.c +++ b/drivers/net/e1000e/phy.c @@ -95,13 +95,6 @@ static const u16 e1000_igp_2_cable_length_table[] = /* BM PHY Copper Specific Control 1 */ #define BM_CS_CTRL1 16 -/* BM PHY Copper Specific Status */ -#define BM_CS_STATUS 17 -#define BM_CS_STATUS_LINK_UP 0x0400 -#define BM_CS_STATUS_RESOLVED 0x0800 -#define BM_CS_STATUS_SPEED_MASK 0xC000 -#define BM_CS_STATUS_SPEED_1000 0x8000 - #define HV_MUX_DATA_CTRL PHY_REG(776, 16) #define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 #define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 @@ -164,16 +157,25 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw) * MDIC mode. No harm in trying again in this case since * the PHY ID is unknown at this point anyway */ + ret_val = phy->ops.acquire_phy(hw); + if (ret_val) + goto out; ret_val = e1000_set_mdio_slow_mode_hv(hw, true); if (ret_val) goto out; + phy->ops.release_phy(hw); retry_count++; } out: /* Revert to MDIO fast mode, if applicable */ - if (retry_count) + if (retry_count) { + ret_val = phy->ops.acquire_phy(hw); + if (ret_val) + return ret_val; ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + phy->ops.release_phy(hw); + } return ret_val; } @@ -354,94 +356,173 @@ s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) } /** - * e1000e_read_phy_reg_igp - Read igp PHY register + * __e1000e_read_phy_reg_igp - Read igp PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired + * and stores the retrieved information in data. Release any acquired * semaphores before exiting. **/ -s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { - s32 ret_val; + s32 ret_val = 0; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; + if (!locked) { + if (!(hw->phy.ops.acquire_phy)) + goto out; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + } if (offset > MAX_PHY_MULTI_PAGE_REG) { ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) { - hw->phy.ops.release_phy(hw); - return ret_val; - } + if (ret_val) + goto release; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, - data); - - hw->phy.ops.release_phy(hw); + data); +release: + if (!locked) + hw->phy.ops.release_phy(hw); +out: return ret_val; } /** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** * e1000e_write_phy_reg_igp - Write igp PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ -s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { - s32 ret_val; + s32 ret_val = 0; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; + if (!locked) { + if (!(hw->phy.ops.acquire_phy)) + goto out; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + } if (offset > MAX_PHY_MULTI_PAGE_REG) { ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (u16)offset); - if (ret_val) { - hw->phy.ops.release_phy(hw); - return ret_val; - } + if (ret_val) + goto release; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release_phy(hw); +release: + if (!locked) + hw->phy.ops.release_phy(hw); +out: return ret_val; } /** - * e1000e_read_kmrn_reg - Read kumeran register + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then reads the PHY register at offset * using the kumeran interface. The information retrieved is stored in data. * Release any acquired semaphores before exiting. **/ -s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { u32 kmrnctrlsta; - s32 ret_val; + s32 ret_val = 0; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; + if (!locked) { + if (!(hw->phy.ops.acquire_phy)) + goto out; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; @@ -452,41 +533,111 @@ s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) kmrnctrlsta = er32(KMRNCTRLSTA); *data = (u16)kmrnctrlsta; - hw->phy.ops.release_phy(hw); + if (!locked) + hw->phy.ops.release_phy(hw); +out: return ret_val; } /** - * e1000e_write_kmrn_reg - Write kumeran register + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary. Then write the data to PHY register * at the offset using the kumeran interface. Release any acquired semaphores * before exiting. **/ -s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { u32 kmrnctrlsta; - s32 ret_val; + s32 ret_val = 0; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; + if (!locked) { + if (!(hw->phy.ops.acquire_phy)) + goto out; + + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + goto out; + } kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & E1000_KMRNCTRLSTA_OFFSET) | data; ew32(KMRNCTRLSTA, kmrnctrlsta); udelay(2); - hw->phy.ops.release_phy(hw); + if (!locked) + hw->phy.ops.release_phy(hw); + +out: return ret_val; } /** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link * @hw: pointer to the HW structure * @@ -2105,6 +2256,10 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) u32 page = offset >> IGP_PAGE_SHIFT; u32 page_shift = 0; + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, @@ -2112,10 +2267,6 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2135,18 +2286,15 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); - if (ret_val) { - hw->phy.ops.release_phy(hw); + if (ret_val) goto out; - } } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release_phy(hw); - out: + hw->phy.ops.release_phy(hw); return ret_val; } @@ -2167,6 +2315,10 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) u32 page = offset >> IGP_PAGE_SHIFT; u32 page_shift = 0; + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, @@ -2174,10 +2326,6 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2197,17 +2345,14 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) /* Page is shifted left, PHY expects (page x 32) */ ret_val = e1000e_write_phy_reg_mdic(hw, page_select, (page << page_shift)); - if (ret_val) { - hw->phy.ops.release_phy(hw); + if (ret_val) goto out; - } } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); - hw->phy.ops.release_phy(hw); - out: + hw->phy.ops.release_phy(hw); return ret_val; } @@ -2226,17 +2371,17 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, true); - return ret_val; + goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; - hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2245,16 +2390,14 @@ s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); - if (ret_val) { - hw->phy.ops.release_phy(hw); - return ret_val; - } + if (ret_val) + goto out; } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); +out: hw->phy.ops.release_phy(hw); - return ret_val; } @@ -2272,17 +2415,17 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) s32 ret_val; u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + /* Page 800 works differently than the rest so it has its own func */ if (page == BM_WUC_PAGE) { ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, false); - return ret_val; + goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; - hw->phy.addr = 1; if (offset > MAX_PHY_MULTI_PAGE_REG) { @@ -2290,17 +2433,15 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, page); - if (ret_val) { - hw->phy.ops.release_phy(hw); - return ret_val; - } + if (ret_val) + goto out; } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, data); +out: hw->phy.ops.release_phy(hw); - return ret_val; } @@ -2320,6 +2461,8 @@ s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) * 3) Write the address using the address opcode (0x11) * 4) Read or write the data using the data opcode (0x12) * 5) Restore 769_17.2 to its original value + * + * Assumes semaphore already acquired. **/ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data, bool read) @@ -2327,20 +2470,12 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, s32 ret_val; u16 reg = BM_PHY_REG_NUM(offset); u16 phy_reg = 0; - u8 phy_acquired = 1; - /* Gig must be disabled for MDIO accesses to page 800 */ if ((hw->mac.type == e1000_pchlan) && (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) hw_dbg(hw, "Attempting to access page 800 while gig enabled\n"); - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) { - phy_acquired = 0; - goto out; - } - /* All operations in this function are phy address 1 */ hw->phy.addr = 1; @@ -2397,8 +2532,6 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); out: - if (phy_acquired == 1) - hw->phy.ops.release_phy(hw); return ret_val; } @@ -2439,52 +2572,63 @@ static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active) return 0; } +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + * @slow: true for slow mode, false for normal mode + * + * Assumes semaphore already acquired. + **/ s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw, bool slow) { s32 ret_val = 0; u16 data = 0; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - return ret_val; - /* Set MDIO mode - page 769, register 16: 0x2580==slow, 0x2180==fast */ hw->phy.addr = 1; ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release_phy(hw); - return ret_val; - } + if (ret_val) + goto out; + ret_val = e1000e_write_phy_reg_mdic(hw, BM_CS_CTRL1, (0x2180 | (slow << 10))); + if (ret_val) + goto out; /* dummy read when reverting to fast mode - throw away result */ if (!slow) - e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); - - hw->phy.ops.release_phy(hw); + ret_val = e1000e_read_phy_reg_mdic(hw, BM_CS_CTRL1, &data); +out: return ret_val; } /** - * e1000_read_phy_reg_hv - Read HV PHY register + * __e1000_read_phy_reg_hv - Read HV PHY register * @hw: pointer to the HW structure * @offset: register offset to be read * @data: pointer to the read data + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retrieved information in data. Release any acquired + * and stores the retrieved information in data. Release any acquired * semaphore before exiting. **/ -s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); bool in_slow_mode = false; + if (!locked) { + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + } + /* Workaround failure in MDIO access while cable is disconnected */ if ((hw->phy.type == e1000_phy_82577) && !(er32(STATUS) & E1000_STATUS_LU)) { @@ -2508,10 +2652,6 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (page == HV_INTC_FC_PAGE_START) @@ -2529,42 +2669,76 @@ s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (page << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release_phy(hw); - goto out; - } hw->phy.addr = phy_addr; } } ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); - hw->phy.ops.release_phy(hw); - out: /* Revert to MDIO fast mode, if applicable */ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + if (!locked) + hw->phy.ops.release_phy(hw); + return ret_val; } /** - * e1000_write_phy_reg_hv - Write HV PHY register + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register * @hw: pointer to the HW structure * @offset: register offset to write to * @data: data to write at register offset + * @locked: semaphore has already been acquired or not * * Acquires semaphore, if necessary, then writes the data to PHY register * at the offset. Release any acquired semaphores before exiting. **/ -s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) { s32 ret_val; u16 page = BM_PHY_REG_PAGE(offset); u16 reg = BM_PHY_REG_NUM(offset); bool in_slow_mode = false; + if (!locked) { + ret_val = hw->phy.ops.acquire_phy(hw); + if (ret_val) + return ret_val; + } + /* Workaround failure in MDIO access while cable is disconnected */ if ((hw->phy.type == e1000_phy_82577) && !(er32(STATUS) & E1000_STATUS_LU)) { @@ -2588,10 +2762,6 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) goto out; } - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; - hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); if (page == HV_INTC_FC_PAGE_START) @@ -2607,15 +2777,10 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { u16 data2 = 0x7EFF; - hw->phy.ops.release_phy(hw); ret_val = e1000_access_phy_debug_regs_hv(hw, (1 << 6) | 0x3, &data2, false); if (ret_val) goto out; - - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) - goto out; } if (reg > MAX_PHY_MULTI_PAGE_REG) { @@ -2630,27 +2795,53 @@ s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, (page << IGP_PAGE_SHIFT)); - if (ret_val) { - hw->phy.ops.release_phy(hw); - goto out; - } hw->phy.addr = phy_addr; } } ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); - hw->phy.ops.release_phy(hw); out: /* Revert to MDIO fast mode, if applicable */ if ((hw->phy.type == e1000_phy_82577) && in_slow_mode) ret_val = e1000_set_mdio_slow_mode_hv(hw, false); + if (!locked) + hw->phy.ops.release_phy(hw); + return ret_val; } /** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true); +} + +/** * e1000_get_phy_addr_for_hv_page - Get PHY adrress based on page * @page: page to be accessed **/ @@ -2671,10 +2862,9 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page) * @data: pointer to the data to be read or written * @read: determines if operation is read or written * - * Acquires semaphore, if necessary, then reads the PHY register at offset - * and storing the retreived information in data. Release any acquired - * semaphores before exiting. Note that the procedure to read these regs - * uses the address port and data port to read/write. + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to read these regs uses the address port and data port to read/write. **/ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, u16 *data, bool read) @@ -2682,20 +2872,12 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, s32 ret_val; u32 addr_reg = 0; u32 data_reg = 0; - u8 phy_acquired = 1; /* This takes care of the difference with desktop vs mobile phy */ addr_reg = (hw->phy.type == e1000_phy_82578) ? I82578_ADDR_REG : I82577_ADDR_REG; data_reg = addr_reg + 1; - ret_val = hw->phy.ops.acquire_phy(hw); - if (ret_val) { - hw_dbg(hw, "Could not acquire PHY\n"); - phy_acquired = 0; - goto out; - } - /* All operations in this function are phy address 2 */ hw->phy.addr = 2; @@ -2718,8 +2900,6 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, } out: - if (phy_acquired == 1) - hw->phy.ops.release_phy(hw); return ret_val; } diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c index 96f5b2a2d2c..f7d9ac8324c 100644 --- a/drivers/net/ethoc.c +++ b/drivers/net/ethoc.c @@ -223,24 +223,25 @@ struct ethoc_bd { u32 addr; }; -static u32 ethoc_read(struct ethoc *dev, loff_t offset) +static inline u32 ethoc_read(struct ethoc *dev, loff_t offset) { return ioread32(dev->iobase + offset); } -static void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) +static inline void ethoc_write(struct ethoc *dev, loff_t offset, u32 data) { iowrite32(data, dev->iobase + offset); } -static void ethoc_read_bd(struct ethoc *dev, int index, struct ethoc_bd *bd) +static inline void ethoc_read_bd(struct ethoc *dev, int index, + struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); bd->stat = ethoc_read(dev, offset + 0); bd->addr = ethoc_read(dev, offset + 4); } -static void ethoc_write_bd(struct ethoc *dev, int index, +static inline void ethoc_write_bd(struct ethoc *dev, int index, const struct ethoc_bd *bd) { loff_t offset = ETHOC_BD_BASE + (index * sizeof(struct ethoc_bd)); @@ -248,33 +249,33 @@ static void ethoc_write_bd(struct ethoc *dev, int index, ethoc_write(dev, offset + 4, bd->addr); } -static void ethoc_enable_irq(struct ethoc *dev, u32 mask) +static inline void ethoc_enable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask |= mask; ethoc_write(dev, INT_MASK, imask); } -static void ethoc_disable_irq(struct ethoc *dev, u32 mask) +static inline void ethoc_disable_irq(struct ethoc *dev, u32 mask) { u32 imask = ethoc_read(dev, INT_MASK); imask &= ~mask; ethoc_write(dev, INT_MASK, imask); } -static void ethoc_ack_irq(struct ethoc *dev, u32 mask) +static inline void ethoc_ack_irq(struct ethoc *dev, u32 mask) { ethoc_write(dev, INT_SOURCE, mask); } -static void ethoc_enable_rx_and_tx(struct ethoc *dev) +static inline void ethoc_enable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode |= MODER_RXEN | MODER_TXEN; ethoc_write(dev, MODER, mode); } -static void ethoc_disable_rx_and_tx(struct ethoc *dev) +static inline void ethoc_disable_rx_and_tx(struct ethoc *dev) { u32 mode = ethoc_read(dev, MODER); mode &= ~(MODER_RXEN | MODER_TXEN); @@ -508,7 +509,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id) return IRQ_NONE; } - ethoc_ack_irq(priv, INT_MASK_ALL); + ethoc_ack_irq(priv, pending); if (pending & INT_MASK_BUSY) { dev_err(&dev->dev, "packet dropped\n"); @@ -664,7 +665,8 @@ static int ethoc_open(struct net_device *dev) return ret; /* calculate the number of TX/RX buffers, maximum 128 supported */ - num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); + num_bd = min_t(unsigned int, + 128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); priv->num_tx = max(min_tx, num_bd / 4); priv->num_rx = num_bd - priv->num_tx; ethoc_write(priv, TX_BD_NUM, priv->num_tx); diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 29234380e6c..16a1d58419d 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c @@ -1654,7 +1654,7 @@ static const struct net_device_ops fec_netdev_ops = { * * index is only used in legacy code */ -int __init fec_enet_init(struct net_device *dev, int index) +static int fec_enet_init(struct net_device *dev, int index) { struct fec_enet_private *fep = netdev_priv(dev); struct bufdesc *cbd_base; diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index c40113f5896..66dace6d324 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c @@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev) mpc52xx_fec_hw_init(dev); - if (priv->phydev) { - phy_stop(priv->phydev); - phy_write(priv->phydev, MII_BMCR, BMCR_RESET); - phy_start(priv->phydev); - } - bcom_fec_rx_reset(priv->rx_dmatsk); bcom_fec_tx_reset(priv->tx_dmatsk); diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c index 31e6d62b785..ee0f3c6d3f8 100644 --- a/drivers/net/fec_mpc52xx_phy.c +++ b/drivers/net/fec_mpc52xx_phy.c @@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = { { .compatible = "mpc5200b-fec-phy", }, {} }; +MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match); struct of_platform_driver mpc52xx_fec_mdio_driver = { .name = "mpc5200b-fec-phy", diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 2bc2d2b2064..ec2f5034457 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c @@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = { #endif {} }; +MODULE_DEVICE_TABLE(of, fs_enet_match); static struct of_platform_driver fs_enet_driver = { .name = "fs_enet", diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c index 93b481b0e3c..24ff9f43a62 100644 --- a/drivers/net/fs_enet/mii-bitbang.c +++ b/drivers/net/fs_enet/mii-bitbang.c @@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match); static struct of_platform_driver fs_enet_bb_mdio_driver = { .name = "fsl-bb-mdio", diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c index a2d69c1cd07..96eba4280c5 100644 --- a/drivers/net/fs_enet/mii-fec.c +++ b/drivers/net/fs_enet/mii-fec.c @@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = { #endif {}, }; +MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match); static struct of_platform_driver fs_enet_fec_mdio_driver = { .name = "fsl-fec-mdio", diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c index d167090248e..6ac46486697 100644 --- a/drivers/net/fsl_pq_mdio.c +++ b/drivers/net/fsl_pq_mdio.c @@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); static struct of_platform_driver fsl_pq_mdio_driver = { .name = "fsl-pq_mdio", diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1e5289ffef6..5bf31f1509c 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c @@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id) return IRQ_HANDLED; } -/* work with hotplug and coldplug */ -MODULE_ALIAS("platform:fsl-gianfar"); - static struct of_device_id gfar_match[] = { { @@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] = }, {}, }; +MODULE_DEVICE_TABLE(of, gfar_match); /* Structure for a device driver */ static struct of_platform_driver gfar_driver = { diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c index 89c82c5e63e..3fae8755979 100644 --- a/drivers/net/ibm_newemac/core.c +++ b/drivers/net/ibm_newemac/core.c @@ -24,6 +24,7 @@ * */ +#include <linux/module.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/errno.h> @@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s ret |= EMAC_MR1_TFS_2K; break; default: - printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", dev->ndev->name, tx_size); } @@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ DBG2(dev, "__emac4_calc_base_mr1" NL); switch(tx_size) { + case 16384: + ret |= EMAC4_MR1_TFS_16K; + break; case 4096: ret |= EMAC4_MR1_TFS_4K; break; @@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_ ret |= EMAC4_MR1_TFS_2K; break; default: - printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n", + printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n", dev->ndev->name, tx_size); } @@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] = }, {}, }; +MODULE_DEVICE_TABLE(of, emac_match); static struct of_platform_driver emac_driver = { .name = "emac", diff --git a/drivers/net/ibm_newemac/emac.h b/drivers/net/ibm_newemac/emac.h index 0afc2cf5c52..d34adf99fc6 100644 --- a/drivers/net/ibm_newemac/emac.h +++ b/drivers/net/ibm_newemac/emac.h @@ -153,6 +153,7 @@ struct emac_regs { #define EMAC4_MR1_RFS_16K 0x00280000 #define EMAC4_MR1_TFS_2K 0x00020000 #define EMAC4_MR1_TFS_4K 0x00030000 +#define EMAC4_MR1_TFS_16K 0x00050000 #define EMAC4_MR1_TR 0x00008000 #define EMAC4_MR1_MWSW_001 0x00001000 #define EMAC4_MR1_JPSM 0x00000800 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 801f088c134..030913f8bd2 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -98,12 +98,13 @@ static void ri_tasklet(unsigned long dev) stats->tx_packets++; stats->tx_bytes +=skb->len; - skb->dev = __dev_get_by_index(&init_net, skb->iif); + skb->dev = dev_get_by_index(&init_net, skb->iif); if (!skb->dev) { dev_kfree_skb(skb); stats->tx_dropped++; break; } + dev_put(skb->dev); skb->iif = _dev->ifindex; if (from & AT_EGRESS) { diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c index deaea8fa103..b243ed3b0c3 100644 --- a/drivers/net/igb/igb_ethtool.c +++ b/drivers/net/igb/igb_ethtool.c @@ -732,7 +732,7 @@ static int igb_set_ringparam(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); struct igb_ring *temp_ring; - int i, err; + int i, err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) @@ -752,18 +752,30 @@ static int igb_set_ringparam(struct net_device *netdev, return 0; } + while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) + msleep(1); + + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + if (adapter->num_tx_queues > adapter->num_rx_queues) temp_ring = vmalloc(adapter->num_tx_queues * sizeof(struct igb_ring)); else temp_ring = vmalloc(adapter->num_rx_queues * sizeof(struct igb_ring)); - if (!temp_ring) - return -ENOMEM; - while (test_and_set_bit(__IGB_RESETTING, &adapter->state)) - msleep(1); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } - if (netif_running(adapter->netdev)) - igb_down(adapter); + igb_down(adapter); /* * We can't just free everything and then setup again, @@ -820,14 +832,11 @@ static int igb_set_ringparam(struct net_device *netdev, adapter->rx_ring_count = new_rx_count; } - - err = 0; err_setup: - if (netif_running(adapter->netdev)) - igb_up(adapter); - - clear_bit(__IGB_RESETTING, &adapter->state); + igb_up(adapter); vfree(temp_ring); +clear_reset: + clear_bit(__IGB_RESETTING, &adapter->state); return err; } diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c index ee17a097d1c..c68265bd0d1 100644 --- a/drivers/net/igbvf/ethtool.c +++ b/drivers/net/igbvf/ethtool.c @@ -279,7 +279,7 @@ static int igbvf_set_ringparam(struct net_device *netdev, { struct igbvf_adapter *adapter = netdev_priv(netdev); struct igbvf_ring *temp_ring; - int err; + int err = 0; u32 new_rx_count, new_tx_count; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) @@ -299,15 +299,22 @@ static int igbvf_set_ringparam(struct net_device *netdev, return 0; } - temp_ring = vmalloc(sizeof(struct igbvf_ring)); - if (!temp_ring) - return -ENOMEM; - while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) msleep(1); - if (netif_running(adapter->netdev)) - igbvf_down(adapter); + if (!netif_running(adapter->netdev)) { + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + goto clear_reset; + } + + temp_ring = vmalloc(sizeof(struct igbvf_ring)); + if (!temp_ring) { + err = -ENOMEM; + goto clear_reset; + } + + igbvf_down(adapter); /* * We can't just free everything and then setup again, @@ -339,14 +346,11 @@ static int igbvf_set_ringparam(struct net_device *netdev, memcpy(adapter->rx_ring, temp_ring,sizeof(struct igbvf_ring)); } - - err = 0; err_setup: - if (netif_running(adapter->netdev)) - igbvf_up(adapter); - - clear_bit(__IGBVF_RESETTING, &adapter->state); + igbvf_up(adapter); vfree(temp_ring); +clear_reset: + clear_bit(__IGBVF_RESETTING, &adapter->state); return err; } diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c index 38bf7cf2256..c412e802617 100644 --- a/drivers/net/irda/sa1100_ir.c +++ b/drivers/net/irda/sa1100_ir.c @@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si) /* * Ensure that the ports for this device are setup correctly. */ - if (si->pdata->startup) - si->pdata->startup(si->dev); + if (si->pdata->startup) { + ret = si->pdata->startup(si->dev); + if (ret) + return ret; + } /* * Configure PPC for IRDA - we want to drive TXD2 low. diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c index fa314cb005a..856c18c207f 100644 --- a/drivers/net/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ixgbe/ixgbe_ethtool.c @@ -798,7 +798,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *temp_tx_ring, *temp_rx_ring; - int i, err; + int i, err = 0; u32 new_rx_count, new_tx_count; bool need_update = false; @@ -822,6 +822,16 @@ static int ixgbe_set_ringparam(struct net_device *netdev, while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) msleep(1); + if (!netif_running(adapter->netdev)) { + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i].count = new_tx_count; + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i].count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto err_setup; + } + temp_tx_ring = kcalloc(adapter->num_tx_queues, sizeof(struct ixgbe_ring), GFP_KERNEL); if (!temp_tx_ring) { @@ -879,8 +889,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, /* if rings need to be updated, here's the place to do it in one shot */ if (need_update) { - if (netif_running(netdev)) - ixgbe_down(adapter); + ixgbe_down(adapter); /* tx */ if (new_tx_count != adapter->tx_ring_count) { @@ -897,13 +906,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, temp_rx_ring = NULL; adapter->rx_ring_count = new_rx_count; } - } - - /* success! */ - err = 0; - if (netif_running(netdev)) ixgbe_up(adapter); - + } err_setup: clear_bit(__IXGBE_RESETTING, &adapter->state); return err; diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index b02a981c87a..34a6cfd1793 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c @@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters = } }; -struct enp2611_ixpdev_priv -{ - struct ixpdev_priv ixpdev_priv; - struct net_device_stats stats; -}; - static struct net_device *nds[3]; static struct timer_list link_check_timer; -static struct net_device_stats *enp2611_get_stats(struct net_device *dev) -{ - struct enp2611_ixpdev_priv *ip = netdev_priv(dev); - - pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats)); - - return &(ip->stats); -} - /* @@@ Poll the SFP moddef0 line too. */ /* @@@ Try to use the pm3386 DOOL interrupt as well. */ static void enp2611_check_link_status(unsigned long __dummy) @@ -203,14 +188,13 @@ static int __init enp2611_init_module(void) ports = pm3386_port_count(); for (i = 0; i < ports; i++) { - nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); + nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv)); if (nds[i] == NULL) { while (--i >= 0) free_netdev(nds[i]); return -ENOMEM; } - nds[i]->get_stats = enp2611_get_stats; pm3386_init_port(i); pm3386_get_mac(i, nds[i]->dev_addr); } diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c index 127243461a5..9aee0cc922c 100644 --- a/drivers/net/ixp2000/ixpdev.c +++ b/drivers/net/ixp2000/ixpdev.c @@ -21,6 +21,7 @@ #include "ixp2400_tx.ucode" #include "ixpdev_priv.h" #include "ixpdev.h" +#include "pm3386.h" #define DRV_MODULE_VERSION "0.2" @@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev) return 0; } +static struct net_device_stats *ixpdev_get_stats(struct net_device *dev) +{ + struct ixpdev_priv *ip = netdev_priv(dev); + + pm3386_get_stats(ip->channel, &(dev->stats)); + + return &(dev->stats); +} + static const struct net_device_ops ixpdev_netdev_ops = { .ndo_open = ixpdev_open, .ndo_stop = ixpdev_close, @@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = { .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, + .ndo_get_stats = ixpdev_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixpdev_poll_controller, #endif diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c index 23783586435..a23f739d222 100644 --- a/drivers/net/ks8851.c +++ b/drivers/net/ks8851.c @@ -171,6 +171,36 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val) } /** + * ks8851_wrreg8 - write 8bit register value to chip + * @ks: The chip state + * @reg: The register address + * @val: The value to write + * + * Issue a write to put the value @val into the register specified in @reg. + */ +static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val) +{ + struct spi_transfer *xfer = &ks->spi_xfer1; + struct spi_message *msg = &ks->spi_msg1; + __le16 txb[2]; + int ret; + int bit; + + bit = 1 << (reg & 3); + + txb[0] = cpu_to_le16(MK_OP(bit, reg) | KS_SPIOP_WR); + txb[1] = val; + + xfer->tx_buf = txb; + xfer->rx_buf = NULL; + xfer->len = 3; + + ret = spi_sync(ks->spidev, msg); + if (ret < 0) + ks_err(ks, "spi_sync() failed\n"); +} + +/** * ks8851_rx_1msg - select whether to use one or two messages for spi read * @ks: The device structure * @@ -322,13 +352,12 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op) static int ks8851_write_mac_addr(struct net_device *dev) { struct ks8851_net *ks = netdev_priv(dev); - u16 *mcp = (u16 *)dev->dev_addr; + int i; mutex_lock(&ks->lock); - ks8851_wrreg16(ks, KS_MARL, mcp[0]); - ks8851_wrreg16(ks, KS_MARM, mcp[1]); - ks8851_wrreg16(ks, KS_MARH, mcp[2]); + for (i = 0; i < ETH_ALEN; i++) + ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]); mutex_unlock(&ks->lock); @@ -951,7 +980,7 @@ static void ks8851_set_rx_mode(struct net_device *dev) mcptr = mcptr->next; } - rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXAE | RXCR1_RXPAFMA; + rxctrl.rxcr1 = RXCR1_RXME | RXCR1_RXPAFMA; } else { /* just accept broadcast / unicast */ rxctrl.rxcr1 = RXCR1_RXPAFMA; @@ -1239,6 +1268,9 @@ static int __devinit ks8851_probe(struct spi_device *spi) ndev->netdev_ops = &ks8851_netdev_ops; ndev->irq = spi->irq; + /* issue a global soft reset to reset the device. */ + ks8851_soft_reset(ks, GRR_GSR); + /* simple check for a valid chip being connected to the bus */ if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h index 85abe147afb..f52c312cc35 100644 --- a/drivers/net/ks8851.h +++ b/drivers/net/ks8851.h @@ -16,6 +16,7 @@ #define CCR_32PIN (1 << 0) /* MAC address registers */ +#define KS_MAR(_m) 0x15 - (_m) #define KS_MARL 0x10 #define KS_MARM 0x12 #define KS_MARH 0x14 diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 5dd7225b178..291a505fd4f 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c @@ -1282,6 +1282,7 @@ static struct pci_device_id mlx4_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */ { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/ + { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */ { 0, } }; diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 6930c87f362..f3624517cb0 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -75,7 +75,7 @@ #include "myri10ge_mcp.h" #include "myri10ge_mcp_gen_header.h" -#define MYRI10GE_VERSION_STR "1.5.0-1.432" +#define MYRI10GE_VERSION_STR "1.5.1-1.451" MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); MODULE_AUTHOR("Maintainer: help@myri.com"); @@ -1624,10 +1624,21 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) return 0; } } - if (*ptr == 'R' || *ptr == 'Q') { - /* We've found either an XFP or quad ribbon fiber */ + if (*ptr == '2') + ptr++; + if (*ptr == 'R' || *ptr == 'Q' || *ptr == 'S') { + /* We've found either an XFP, quad ribbon fiber, or SFP+ */ cmd->port = PORT_FIBRE; + cmd->supported |= SUPPORTED_FIBRE; + cmd->advertising |= ADVERTISED_FIBRE; + } else { + cmd->port = PORT_OTHER; } + if (*ptr == 'R' || *ptr == 'S') + cmd->transceiver = XCVR_EXTERNAL; + else + cmd->transceiver = XCVR_INTERNAL; + return 0; } diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h index 7a7177421d7..1c46da63212 100644 --- a/drivers/net/netxen/netxen_nic_hdr.h +++ b/drivers/net/netxen/netxen_nic_hdr.h @@ -419,6 +419,7 @@ enum { #define NETXEN_CRB_ROMUSB \ NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_ROMUSB) #define NETXEN_CRB_I2Q NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2Q) +#define NETXEN_CRB_I2C0 NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_I2C0) #define NETXEN_CRB_SMB NETXEN_PCI_CRB_WINDOW(NETXEN_HW_PX_MAP_CRB_SMB) #define NETXEN_CRB_MAX NETXEN_PCI_CRB_WINDOW(64) diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c index 32314000dfc..3185a98b091 100644 --- a/drivers/net/netxen/netxen_nic_hw.c +++ b/drivers/net/netxen/netxen_nic_hw.c @@ -1901,22 +1901,16 @@ netxen_setup_hwops(struct netxen_adapter *adapter) int netxen_nic_get_board_info(struct netxen_adapter *adapter) { - int offset, board_type, magic, header_version; + int offset, board_type, magic; struct pci_dev *pdev = adapter->pdev; offset = NX_FW_MAGIC_OFFSET; if (netxen_rom_fast_read(adapter, offset, &magic)) return -EIO; - offset = NX_HDR_VERSION_OFFSET; - if (netxen_rom_fast_read(adapter, offset, &header_version)) - return -EIO; - - if (magic != NETXEN_BDINFO_MAGIC || - header_version != NETXEN_BDINFO_VERSION) { - dev_err(&pdev->dev, - "invalid board config, magic=%08x, version=%08x\n", - magic, header_version); + if (magic != NETXEN_BDINFO_MAGIC) { + dev_err(&pdev->dev, "invalid board config, magic=%08x\n", + magic); return -EIO; } diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 91c2bc61c8e..e40b914d6fa 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c @@ -531,6 +531,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) continue; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { + if (off == (NETXEN_CRB_I2C0 + 0x1c)) + continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; @@ -553,12 +555,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) continue; } - if (off == NETXEN_ADDR_ERROR) { - printk(KERN_ERR "%s: Err: Unknown addr: 0x%08x\n", - netxen_nic_driver_name, buf[i].addr); - continue; - } - init_delay = 1; /* After writing this register, HW needs time for CRB */ /* to quiet down (else crb_window returns 0xffffffff) */ diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9b9eab10770..0b4a56a8c8d 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c @@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter) void __iomem *mem_ptr2 = NULL; void __iomem *db_ptr = NULL; - unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0; + resource_size_t mem_base, db_base; + unsigned long mem_len, db_len = 0, pci_len0 = 0; struct pci_dev *pdev = adapter->pdev; int pci_func = adapter->ahw.pci_func; @@ -1918,6 +1919,7 @@ static void netxen_tx_timeout_task(struct work_struct *work) request_reset: adapter->need_fw_reset = 1; + clear_bit(__NX_RESETTING, &adapter->state); } struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) diff --git a/drivers/net/niu.c b/drivers/net/niu.c index f9364d0678f..d6c7ac68f6e 100644 --- a/drivers/net/niu.c +++ b/drivers/net/niu.c @@ -3545,7 +3545,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np, rp->rcr_index = index; skb_reserve(skb, NET_IP_ALIGN); - __pskb_pull_tail(skb, min(len, NIU_RXPULL_MAX)); + __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN)); rp->rx_packets++; rp->rx_bytes += skb->len; diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c index ee8ad3e180d..b58965a2b3a 100644 --- a/drivers/net/pcmcia/3c574_cs.c +++ b/drivers/net/pcmcia/3c574_cs.c @@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev); static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static const struct ethtool_ops netdev_ethtool_ops; static void set_rx_mode(struct net_device *dev); +static void set_multicast_list(struct net_device *dev); static void tc574_detach(struct pcmcia_device *p_dev); @@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = { .ndo_tx_timeout = el3_tx_timeout, .ndo_get_stats = el3_get_stats, .ndo_do_ioctl = el3_ioctl, - .ndo_set_multicast_list = set_rx_mode, + .ndo_set_multicast_list = set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, @@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev) outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD); } +static void set_multicast_list(struct net_device *dev) +{ + struct el3_private *lp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&lp->window_lock, flags); + set_rx_mode(dev); + spin_unlock_irqrestore(&lp->window_lock, flags); +} + static int el3_close(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c index bd3447f0490..94c9ad2746b 100644 --- a/drivers/net/pcmcia/pcnet_cs.c +++ b/drivers/net/pcmcia/pcnet_cs.c @@ -1760,7 +1760,7 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 250e10f2c35..8659d341e76 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = { }, {}, }; +MODULE_DEVICE_TABLE(of, mdio_ofgpio_match); static struct of_platform_driver mdio_ofgpio_driver = { .name = "mdio-gpio", diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 7cbf6f9b51d..2559991eea6 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c @@ -111,9 +111,6 @@ struct pppoe_net { rwlock_t hash_lock; }; -/* to eliminate a race btw pppoe_flush_dev and pppoe_release */ -static DEFINE_SPINLOCK(flush_lock); - /* * PPPoE could be in the following stages: * 1) Discovery stage (to obtain remote MAC and Session ID) @@ -303,45 +300,48 @@ static void pppoe_flush_dev(struct net_device *dev) write_lock_bh(&pn->hash_lock); for (i = 0; i < PPPOE_HASH_SIZE; i++) { struct pppox_sock *po = pn->hash_table[i]; + struct sock *sk; - while (po != NULL) { - struct sock *sk; - if (po->pppoe_dev != dev) { + while (po) { + while (po && po->pppoe_dev != dev) { po = po->next; - continue; } + + if (!po) + break; + sk = sk_pppox(po); - spin_lock(&flush_lock); - po->pppoe_dev = NULL; - spin_unlock(&flush_lock); - dev_put(dev); /* We always grab the socket lock, followed by the - * hash_lock, in that order. Since we should - * hold the sock lock while doing any unbinding, - * we need to release the lock we're holding. - * Hold a reference to the sock so it doesn't disappear - * as we're jumping between locks. + * hash_lock, in that order. Since we should hold the + * sock lock while doing any unbinding, we need to + * release the lock we're holding. Hold a reference to + * the sock so it doesn't disappear as we're jumping + * between locks. */ sock_hold(sk); - write_unlock_bh(&pn->hash_lock); lock_sock(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + if (po->pppoe_dev == dev + && sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { pppox_unbind_sock(sk); sk->sk_state = PPPOX_ZOMBIE; sk->sk_state_change(sk); + po->pppoe_dev = NULL; + dev_put(dev); } release_sock(sk); sock_put(sk); - /* Restart scan at the beginning of this hash chain. - * While the lock was dropped the chain contents may - * have changed. + /* Restart the process from the start of the current + * hash chain. We dropped locks so the world may have + * change from underneath us. */ + + BUG_ON(pppoe_pernet(dev_net(dev)) == NULL); write_lock_bh(&pn->hash_lock); po = pn->hash_table[i]; } @@ -388,11 +388,16 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) struct pppox_sock *po = pppox_sk(sk); struct pppox_sock *relay_po; + /* Backlog receive. Semantics of backlog rcv preclude any code from + * executing in lock_sock()/release_sock() bounds; meaning sk->sk_state + * can't change. + */ + if (sk->sk_state & PPPOX_BOUND) { ppp_input(&po->chan, skb); } else if (sk->sk_state & PPPOX_RELAY) { - relay_po = get_item_by_addr(dev_net(po->pppoe_dev), - &po->pppoe_relay); + relay_po = get_item_by_addr(sock_net(sk), + &po->pppoe_relay); if (relay_po == NULL) goto abort_kfree; @@ -447,6 +452,10 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev, goto drop; pn = pppoe_pernet(dev_net(dev)); + + /* Note that get_item does a sock_hold(), so sk_pppox(po) + * is known to be safe. + */ po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex); if (!po) goto drop; @@ -561,6 +570,7 @@ static int pppoe_release(struct socket *sock) struct sock *sk = sock->sk; struct pppox_sock *po; struct pppoe_net *pn; + struct net *net = NULL; if (!sk) return 0; @@ -571,44 +581,28 @@ static int pppoe_release(struct socket *sock) return -EBADF; } + po = pppox_sk(sk); + + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + dev_put(po->pppoe_dev); + po->pppoe_dev = NULL; + } + pppox_unbind_sock(sk); /* Signal the death of the socket. */ sk->sk_state = PPPOX_DEAD; - /* - * pppoe_flush_dev could lead to a race with - * this routine so we use flush_lock to eliminate - * such a case (we only need per-net specific data) - */ - spin_lock(&flush_lock); - po = pppox_sk(sk); - if (!po->pppoe_dev) { - spin_unlock(&flush_lock); - goto out; - } - pn = pppoe_pernet(dev_net(po->pppoe_dev)); - spin_unlock(&flush_lock); + net = sock_net(sk); + pn = pppoe_pernet(net); /* * protect "po" from concurrent updates * on pppoe_flush_dev */ - write_lock_bh(&pn->hash_lock); + delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, + po->pppoe_ifindex); - po = pppox_sk(sk); - if (stage_session(po->pppoe_pa.sid)) - __delete_item(pn, po->pppoe_pa.sid, po->pppoe_pa.remote, - po->pppoe_ifindex); - - if (po->pppoe_dev) { - dev_put(po->pppoe_dev); - po->pppoe_dev = NULL; - } - - write_unlock_bh(&pn->hash_lock); - -out: sock_orphan(sk); sock->sk = NULL; @@ -625,8 +619,9 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *)uservaddr; struct pppox_sock *po = pppox_sk(sk); - struct net_device *dev; + struct net_device *dev = NULL; struct pppoe_net *pn; + struct net *net = NULL; int error; lock_sock(sk); @@ -652,12 +647,14 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, /* Delete the old binding */ if (stage_session(po->pppoe_pa.sid)) { pppox_unbind_sock(sk); + pn = pppoe_pernet(sock_net(sk)); + delete_item(pn, po->pppoe_pa.sid, + po->pppoe_pa.remote, po->pppoe_ifindex); if (po->pppoe_dev) { - pn = pppoe_pernet(dev_net(po->pppoe_dev)); - delete_item(pn, po->pppoe_pa.sid, - po->pppoe_pa.remote, po->pppoe_ifindex); dev_put(po->pppoe_dev); + po->pppoe_dev = NULL; } + memset(sk_pppox(po) + 1, 0, sizeof(struct pppox_sock) - sizeof(struct sock)); sk->sk_state = PPPOX_NONE; @@ -666,16 +663,15 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, /* Re-bind in session stage only */ if (stage_session(sp->sa_addr.pppoe.sid)) { error = -ENODEV; - dev = dev_get_by_name(sock_net(sk), sp->sa_addr.pppoe.dev); + net = sock_net(sk); + dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); if (!dev) - goto end; + goto err_put; po->pppoe_dev = dev; po->pppoe_ifindex = dev->ifindex; - pn = pppoe_pernet(dev_net(dev)); - write_lock_bh(&pn->hash_lock); + pn = pppoe_pernet(net); if (!(dev->flags & IFF_UP)) { - write_unlock_bh(&pn->hash_lock); goto err_put; } @@ -683,6 +679,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, &sp->sa_addr.pppoe, sizeof(struct pppoe_addr)); + write_lock_bh(&pn->hash_lock); error = __set_item(pn, po); write_unlock_bh(&pn->hash_lock); if (error < 0) @@ -696,8 +693,11 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.ops = &pppoe_chan_ops; error = ppp_register_net_channel(dev_net(dev), &po->chan); - if (error) + if (error) { + delete_item(pn, po->pppoe_pa.sid, + po->pppoe_pa.remote, po->pppoe_ifindex); goto err_put; + } sk->sk_state = PPPOX_CONNECTED; } @@ -915,6 +915,14 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) struct pppoe_hdr *ph; int data_len = skb->len; + /* The higher-level PPP code (ppp_unregister_channel()) ensures the PPP + * xmit operations conclude prior to an unregistration call. Thus + * sk->sk_state cannot change, so we don't need to do lock_sock(). + * But, we also can't do a lock_sock since that introduces a potential + * deadlock as we'd reverse the lock ordering used when calling + * ppp_unregister_channel(). + */ + if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) goto abort; @@ -944,7 +952,6 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb) po->pppoe_pa.remote, NULL, data_len); dev_queue_xmit(skb); - return 1; abort: diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 3ec6e85587a..c2383adcd52 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h @@ -95,6 +95,7 @@ enum { /* Misc. stuff */ MAILBOX_COUNT = 16, + MAILBOX_TIMEOUT = 5, PROC_ADDR_RDY = (1 << 31), PROC_ADDR_R = (1 << 30), @@ -803,6 +804,12 @@ enum { MB_CMD_SET_PORT_CFG = 0x00000122, MB_CMD_GET_PORT_CFG = 0x00000123, MB_CMD_GET_LINK_STS = 0x00000124, + MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ + MB_SET_MPI_TFK_STOP = (1 << 0), + MB_SET_MPI_TFK_RESUME = (1 << 1), + MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ + MB_GET_MPI_TFK_STOPPED = (1 << 0), + MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), /* Mailbox Command Status. */ MB_CMD_STS_GOOD = 0x00004000, /* Success. */ @@ -1168,7 +1175,7 @@ struct ricb { #define RSS_RI6 0x40 #define RSS_RT6 0x80 __le16 mask; - __le32 hash_cq_id[256]; + u8 hash_cq_id[1024]; __le32 ipv6_hash_key[10]; __le32 ipv4_hash_key[4]; } __attribute((packed)); @@ -1606,6 +1613,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); int ql_mb_about_fw(struct ql_adapter *qdev); void ql_link_on(struct ql_adapter *qdev); void ql_link_off(struct ql_adapter *qdev); +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); +int ql_wait_fifo_empty(struct ql_adapter *qdev); #if 1 #define QL_ALL_DUMP diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index 61680715cde..cea7531f4f4 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c @@ -320,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, switch (type) { case MAC_ADDR_TYPE_MULTI_MAC: + { + u32 upper = (addr[0] << 8) | addr[1]; + u32 lower = (addr[2] << 24) | (addr[3] << 16) | + (addr[4] << 8) | (addr[5]); + + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + ql_write32(qdev, MAC_ADDR_DATA, lower); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + ql_write32(qdev, MAC_ADDR_IDX, (offset++) | + (index << MAC_ADDR_IDX_SHIFT) | + type | MAC_ADDR_E); + + ql_write32(qdev, MAC_ADDR_DATA, upper); + status = + ql_wait_reg_rdy(qdev, + MAC_ADDR_IDX, MAC_ADDR_MW, 0); + if (status) + goto exit; + break; + } case MAC_ADDR_TYPE_CAM_MAC: { u32 cam_output; @@ -365,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type, and possibly the function id. Right now we hardcode the route field to NIC core. */ - if (type == MAC_ADDR_TYPE_CAM_MAC) { - cam_output = (CAM_OUT_ROUTE_NIC | - (qdev-> - func << CAM_OUT_FUNC_SHIFT) | - (0 << CAM_OUT_CQ_ID_SHIFT)); - if (qdev->vlgrp) - cam_output |= CAM_OUT_RV; - /* route to NIC core */ - ql_write32(qdev, MAC_ADDR_DATA, cam_output); - } + cam_output = (CAM_OUT_ROUTE_NIC | + (qdev-> + func << CAM_OUT_FUNC_SHIFT) | + (0 << CAM_OUT_CQ_ID_SHIFT)); + if (qdev->vlgrp) + cam_output |= CAM_OUT_RV; + /* route to NIC core */ + ql_write32(qdev, MAC_ADDR_DATA, cam_output); break; } case MAC_ADDR_TYPE_VLAN: @@ -546,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask, } case RT_IDX_MCAST: /* Pass up All Multicast frames. */ { - value = RT_IDX_DST_CAM_Q | /* dest */ + value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; } case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */ { - value = RT_IDX_DST_CAM_Q | /* dest */ + value = RT_IDX_DST_DFLT_Q | /* dest */ RT_IDX_TYPE_NICQ | /* type */ (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */ break; @@ -3077,6 +3106,12 @@ err_irq: static int ql_start_rss(struct ql_adapter *qdev) { + u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, + 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, + 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, + 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, + 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, + 0xbe, 0xac, 0x01, 0xfa}; struct ricb *ricb = &qdev->ricb; int status = 0; int i; @@ -3086,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev) ricb->base_cq = RSS_L4K; ricb->flags = - (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 | - RSS_RT6); - ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1); + (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6); + ricb->mask = cpu_to_le16((u16)(0x3ff)); /* * Fill out the Indirection Table. */ - for (i = 0; i < 256; i++) - hash_id[i] = i & (qdev->rss_ring_count - 1); + for (i = 0; i < 1024; i++) + hash_id[i] = (i & (qdev->rss_ring_count - 1)); - /* - * Random values for the IPv6 and IPv4 Hash Keys. - */ - get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40); - get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16); + memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40); + memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16); QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n"); @@ -3239,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev) ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); + /* Set RX packet routing to use port/pci function on which the + * packet arrived on in addition to usual frame routing. + * This is helpful on bonding where both interfaces can have + * the same MAC address. + */ + ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); + /* Start up the rx queues. */ for (i = 0; i < qdev->rx_ring_count; i++) { status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); @@ -3311,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev) end_jiffies = jiffies + max((unsigned long)1, usecs_to_jiffies(30)); + + /* Stop management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP); + + /* Wait for the NIC and MGMNT FIFOs to empty. */ + ql_wait_fifo_empty(qdev); + ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR); do { @@ -3326,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev) status = -ETIMEDOUT; } + /* Resume management traffic. */ + ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME); return status; } @@ -3704,6 +3751,12 @@ static void ql_asic_reset_work(struct work_struct *work) status = ql_adapter_up(qdev); if (status) goto error; + + /* Restore rx mode. */ + clear_bit(QL_ALLMULTI, &qdev->flags); + clear_bit(QL_PROMISCUOUS, &qdev->flags); + qlge_set_multicast_list(qdev->ndev); + rtnl_unlock(); return; error: @@ -3863,6 +3916,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, goto err_out; } + pci_save_state(pdev); qdev->reg_base = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); @@ -4017,6 +4071,33 @@ static void __devexit qlge_remove(struct pci_dev *pdev) free_netdev(ndev); } +/* Clean up resources without touching hardware. */ +static void ql_eeh_close(struct net_device *ndev) +{ + int i; + struct ql_adapter *qdev = netdev_priv(ndev); + + if (netif_carrier_ok(ndev)) { + netif_carrier_off(ndev); + netif_stop_queue(ndev); + } + + if (test_bit(QL_ADAPTER_UP, &qdev->flags)) + cancel_delayed_work_sync(&qdev->asic_reset_work); + cancel_delayed_work_sync(&qdev->mpi_reset_work); + cancel_delayed_work_sync(&qdev->mpi_work); + cancel_delayed_work_sync(&qdev->mpi_idc_work); + cancel_delayed_work_sync(&qdev->mpi_port_cfg_work); + + for (i = 0; i < qdev->rss_ring_count; i++) + netif_napi_del(&qdev->rx_ring[i].napi); + + clear_bit(QL_ADAPTER_UP, &qdev->flags); + ql_tx_ring_clean(qdev); + ql_free_rx_buffers(qdev); + ql_release_adapter_resources(qdev); +} + /* * This callback is called by the PCI subsystem whenever * a PCI bus error is detected. @@ -4025,17 +4106,21 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, enum pci_channel_state state) { struct net_device *ndev = pci_get_drvdata(pdev); - struct ql_adapter *qdev = netdev_priv(ndev); - netif_device_detach(ndev); - - if (state == pci_channel_io_perm_failure) + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + netif_device_detach(ndev); + if (netif_running(ndev)) + ql_eeh_close(ndev); + pci_disable_device(pdev); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + dev_err(&pdev->dev, + "%s: pci_channel_io_perm_failure.\n", __func__); return PCI_ERS_RESULT_DISCONNECT; - - if (netif_running(ndev)) - ql_adapter_down(qdev); - - pci_disable_device(pdev); + } /* Request a slot reset. */ return PCI_ERS_RESULT_NEED_RESET; @@ -4052,25 +4137,15 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev) struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); + pdev->error_state = pci_channel_io_normal; + + pci_restore_state(pdev); if (pci_enable_device(pdev)) { QPRINTK(qdev, IFUP, ERR, "Cannot re-enable PCI device after reset.\n"); return PCI_ERS_RESULT_DISCONNECT; } - pci_set_master(pdev); - - netif_carrier_off(ndev); - ql_adapter_reset(qdev); - - /* Make sure the EEPROM is good */ - memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len); - - if (!is_valid_ether_addr(ndev->perm_addr)) { - QPRINTK(qdev, IFUP, ERR, "After reset, invalid MAC address.\n"); - return PCI_ERS_RESULT_DISCONNECT; - } - return PCI_ERS_RESULT_RECOVERED; } @@ -4078,17 +4153,21 @@ static void qlge_io_resume(struct pci_dev *pdev) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); + int err = 0; - pci_set_master(pdev); - + if (ql_adapter_reset(qdev)) + QPRINTK(qdev, DRV, ERR, "reset FAILED!\n"); if (netif_running(ndev)) { - if (ql_adapter_up(qdev)) { + err = qlge_open(ndev); + if (err) { QPRINTK(qdev, IFUP, ERR, "Device initialization failed after reset.\n"); return; } + } else { + QPRINTK(qdev, IFUP, ERR, + "Device was not running prior to EEH.\n"); } - netif_device_attach(ndev); } diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c index c2e43073047..bcf13c96f73 100644 --- a/drivers/net/qlge/qlge_mpi.c +++ b/drivers/net/qlge/qlge_mpi.c @@ -470,7 +470,8 @@ end: */ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) { - int status, count; + int status; + unsigned long count; /* Begin polled mode for MPI */ @@ -491,9 +492,9 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) /* Wait for the command to complete. We loop * here because some AEN might arrive while * we're waiting for the mailbox command to - * complete. If more than 5 arrive then we can + * complete. If more than 5 seconds expire we can * assume something is wrong. */ - count = 5; + count = jiffies + HZ * MAILBOX_TIMEOUT; do { /* Wait for the interrupt to come in. */ status = ql_wait_mbx_cmd_cmplt(qdev); @@ -517,15 +518,15 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) MB_CMD_STS_GOOD) || ((mbcp->mbox_out[0] & 0x0000f000) == MB_CMD_STS_INTRMDT)) - break; - } while (--count); + goto done; + } while (time_before(jiffies, count)); - if (!count) { - QPRINTK(qdev, DRV, ERR, - "Timed out waiting for mailbox complete.\n"); - status = -ETIMEDOUT; - goto end; - } + QPRINTK(qdev, DRV, ERR, + "Timed out waiting for mailbox complete.\n"); + status = -ETIMEDOUT; + goto end; + +done: /* Now we can clear the interrupt condition * and look at our status. @@ -768,6 +769,95 @@ static int ql_idc_wait(struct ql_adapter *qdev) return status; } +int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + + mbcp->in_count = 1; + mbcp->out_count = 2; + + mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL; + mbcp->mbox_in[1] = control; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + QPRINTK(qdev, DRV, ERR, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + /* This indicates that the firmware is + * already in the state we are trying to + * change it to. + */ + QPRINTK(qdev, DRV, ERR, + "Command parameters make no change.\n"); + } + return status; +} + +/* Returns a negative error code or the mailbox command status. */ +static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control) +{ + struct mbox_params mbc; + struct mbox_params *mbcp = &mbc; + int status; + + memset(mbcp, 0, sizeof(struct mbox_params)); + *control = 0; + + mbcp->in_count = 1; + mbcp->out_count = 1; + + mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL; + + status = ql_mailbox_command(qdev, mbcp); + if (status) + return status; + + if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) { + *control = mbcp->mbox_in[1]; + return status; + } + + if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) { + QPRINTK(qdev, DRV, ERR, + "Command not supported by firmware.\n"); + status = -EINVAL; + } else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) { + QPRINTK(qdev, DRV, ERR, + "Failed to get MPI traffic control.\n"); + status = -EIO; + } + return status; +} + +int ql_wait_fifo_empty(struct ql_adapter *qdev) +{ + int count = 5; + u32 mgmnt_fifo_empty; + u32 nic_fifo_empty; + + do { + nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE; + ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty); + mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY; + if (nic_fifo_empty && mgmnt_fifo_empty) + return 0; + msleep(100); + } while (count-- > 0); + return -ETIMEDOUT; +} + /* API called in work thread context to set new TX/RX * maximum frame size values to match MTU. */ @@ -876,6 +966,8 @@ void ql_mpi_work(struct work_struct *work) int err = 0; rtnl_lock(); + /* Begin polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); while (ql_read32(qdev, STS) & STS_PI) { memset(mbcp, 0, sizeof(struct mbox_params)); @@ -888,6 +980,8 @@ void ql_mpi_work(struct work_struct *work) break; } + /* End polled mode for MPI */ + ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); rtnl_unlock(); ql_enable_completion_interrupt(qdev, 0); } diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 50c6a3cfe43..fa493567848 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -115,7 +115,9 @@ enum mac_version { RTL_GIGA_MAC_VER_22 = 0x16, // 8168C RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP - RTL_GIGA_MAC_VER_25 = 0x19 // 8168D + RTL_GIGA_MAC_VER_25 = 0x19, // 8168D + RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D + RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP }; #define _R(NAME,MAC,MASK) \ @@ -150,7 +152,9 @@ static const struct { _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E - _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880) // PCI-E + _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E + _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E + _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E }; #undef _R @@ -253,6 +257,13 @@ enum rtl8168_8101_registers { DBG_REG = 0xd1, #define FIX_NAK_1 (1 << 4) #define FIX_NAK_2 (1 << 3) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff }; enum rtl_register_content { @@ -568,6 +579,14 @@ static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value) mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); } +static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m) +{ + int val; + + val = mdio_read(ioaddr, reg_addr); + mdio_write(ioaddr, reg_addr, (val | p) & ~m); +} + static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, int val) { @@ -651,6 +670,24 @@ static u32 rtl_csi_read(void __iomem *ioaddr, int addr) return value; } +static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr) +{ + u8 value = 0xff; + unsigned int i; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + for (i = 0; i < 300; i++) { + if (RTL_R32(EFUSEAR) & EFUSEAR_FLAG) { + value = RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK; + break; + } + udelay(100); + } + + return value; +} + static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr) { RTL_W16(IntrMask, 0x0000); @@ -992,7 +1029,10 @@ static void rtl8169_vlan_rx_register(struct net_device *dev, spin_lock_irqsave(&tp->lock, flags); tp->vlgrp = grp; - if (tp->vlgrp) + /* + * Do not disable RxVlan on 8110SCd. + */ + if (tp->vlgrp || (tp->mac_version == RTL_GIGA_MAC_VER_05)) tp->cp_cmd |= RxVlan; else tp->cp_cmd &= ~RxVlan; @@ -1243,7 +1283,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, int mac_version; } mac_info[] = { /* 8168D family. */ - { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_25 }, + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, /* 8168C family. */ { 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 }, @@ -1648,74 +1691,903 @@ static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr) rtl8168c_3_hw_phy_config(ioaddr); } -static void rtl8168d_hw_phy_config(void __iomem *ioaddr) +static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr) { - struct phy_reg phy_reg_init_0[] = { + static struct phy_reg phy_reg_init_0[] = { { 0x1f, 0x0001 }, - { 0x09, 0x2770 }, - { 0x08, 0x04d0 }, - { 0x0b, 0xad15 }, - { 0x0c, 0x5bf0 }, - { 0x1c, 0xf101 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, { 0x1f, 0x0003 }, - { 0x14, 0x94d7 }, - { 0x12, 0xf4d6 }, - { 0x09, 0xca0f }, - { 0x1f, 0x0002 }, - { 0x0b, 0x0b10 }, - { 0x0c, 0xd1f7 }, - { 0x1f, 0x0002 }, - { 0x06, 0x5461 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 } + }; + static struct phy_reg phy_reg_init_1[] = { { 0x1f, 0x0002 }, - { 0x05, 0x6662 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 } + }; + static struct phy_reg phy_reg_init_2[] = { + { 0x1f, 0x0005 }, + { 0x05, 0xffc2 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8000 }, + { 0x06, 0xf8f9 }, + { 0x06, 0xfaef }, + { 0x06, 0x59ee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x00e0 }, + { 0x06, 0xf87c }, + { 0x06, 0xe1f8 }, + { 0x06, 0x7d59 }, + { 0x06, 0x0fef }, + { 0x06, 0x0139 }, + { 0x06, 0x029e }, + { 0x06, 0x06ef }, + { 0x06, 0x1039 }, + { 0x06, 0x089f }, + { 0x06, 0x2aee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x01e0 }, + { 0x06, 0xf87c }, + { 0x06, 0xe1f8 }, + { 0x06, 0x7d58 }, + { 0x06, 0x409e }, + { 0x06, 0x0f39 }, + { 0x06, 0x46aa }, + { 0x06, 0x0bbf }, + { 0x06, 0x8290 }, + { 0x06, 0xd682 }, + { 0x06, 0x9802 }, + { 0x06, 0x014f }, + { 0x06, 0xae09 }, + { 0x06, 0xbf82 }, + { 0x06, 0x98d6 }, + { 0x06, 0x82a0 }, + { 0x06, 0x0201 }, + { 0x06, 0x4fef }, + { 0x06, 0x95fe }, + { 0x06, 0xfdfc }, + { 0x06, 0x05f8 }, + { 0x06, 0xf9fa }, + { 0x06, 0xeef8 }, + { 0x06, 0xea00 }, + { 0x06, 0xeef8 }, + { 0x06, 0xeb00 }, + { 0x06, 0xe2f8 }, + { 0x06, 0x7ce3 }, + { 0x06, 0xf87d }, + { 0x06, 0xa511 }, + { 0x06, 0x1112 }, + { 0x06, 0xd240 }, + { 0x06, 0xd644 }, + { 0x06, 0x4402 }, + { 0x06, 0x8217 }, + { 0x06, 0xd2a0 }, + { 0x06, 0xd6aa }, + { 0x06, 0xaa02 }, + { 0x06, 0x8217 }, + { 0x06, 0xae0f }, + { 0x06, 0xa544 }, + { 0x06, 0x4402 }, + { 0x06, 0xae4d }, + { 0x06, 0xa5aa }, + { 0x06, 0xaa02 }, + { 0x06, 0xae47 }, + { 0x06, 0xaf82 }, + { 0x06, 0x13ee }, + { 0x06, 0x834e }, + { 0x06, 0x00ee }, + { 0x06, 0x834d }, + { 0x06, 0x0fee }, + { 0x06, 0x834c }, + { 0x06, 0x0fee }, + { 0x06, 0x834f }, + { 0x06, 0x00ee }, + { 0x06, 0x8351 }, + { 0x06, 0x00ee }, + { 0x06, 0x834a }, + { 0x06, 0xffee }, + { 0x06, 0x834b }, + { 0x06, 0xffe0 }, + { 0x06, 0x8330 }, + { 0x06, 0xe183 }, + { 0x06, 0x3158 }, + { 0x06, 0xfee4 }, + { 0x06, 0xf88a }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8be0 }, + { 0x06, 0x8332 }, + { 0x06, 0xe183 }, + { 0x06, 0x3359 }, + { 0x06, 0x0fe2 }, + { 0x06, 0x834d }, + { 0x06, 0x0c24 }, + { 0x06, 0x5af0 }, + { 0x06, 0x1e12 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ce5 }, + { 0x06, 0xf88d }, + { 0x06, 0xaf82 }, + { 0x06, 0x13e0 }, + { 0x06, 0x834f }, + { 0x06, 0x10e4 }, + { 0x06, 0x834f }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x009f }, + { 0x06, 0x0ae0 }, + { 0x06, 0x834f }, + { 0x06, 0xa010 }, + { 0x06, 0xa5ee }, + { 0x06, 0x834e }, + { 0x06, 0x01e0 }, + { 0x06, 0x834e }, + { 0x06, 0x7805 }, + { 0x06, 0x9e9a }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x049e }, + { 0x06, 0x10e0 }, + { 0x06, 0x834e }, + { 0x06, 0x7803 }, + { 0x06, 0x9e0f }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x019e }, + { 0x06, 0x05ae }, + { 0x06, 0x0caf }, + { 0x06, 0x81f8 }, + { 0x06, 0xaf81 }, + { 0x06, 0xa3af }, + { 0x06, 0x81dc }, + { 0x06, 0xaf82 }, + { 0x06, 0x13ee }, + { 0x06, 0x8348 }, + { 0x06, 0x00ee }, + { 0x06, 0x8349 }, + { 0x06, 0x00e0 }, + { 0x06, 0x8351 }, + { 0x06, 0x10e4 }, + { 0x06, 0x8351 }, + { 0x06, 0x5801 }, + { 0x06, 0x9fea }, + { 0x06, 0xd000 }, + { 0x06, 0xd180 }, + { 0x06, 0x1f66 }, + { 0x06, 0xe2f8 }, + { 0x06, 0xeae3 }, + { 0x06, 0xf8eb }, + { 0x06, 0x5af8 }, + { 0x06, 0x1e20 }, + { 0x06, 0xe6f8 }, + { 0x06, 0xeae5 }, + { 0x06, 0xf8eb }, + { 0x06, 0xd302 }, + { 0x06, 0xb3fe }, + { 0x06, 0xe2f8 }, + { 0x06, 0x7cef }, + { 0x06, 0x325b }, + { 0x06, 0x80e3 }, + { 0x06, 0xf87d }, + { 0x06, 0x9e03 }, + { 0x06, 0x7dff }, + { 0x06, 0xff0d }, + { 0x06, 0x581c }, + { 0x06, 0x551a }, + { 0x06, 0x6511 }, + { 0x06, 0xa190 }, + { 0x06, 0xd3e2 }, + { 0x06, 0x8348 }, + { 0x06, 0xe383 }, + { 0x06, 0x491b }, + { 0x06, 0x56ab }, + { 0x06, 0x08ef }, + { 0x06, 0x56e6 }, + { 0x06, 0x8348 }, + { 0x06, 0xe783 }, + { 0x06, 0x4910 }, + { 0x06, 0xd180 }, + { 0x06, 0x1f66 }, + { 0x06, 0xa004 }, + { 0x06, 0xb9e2 }, + { 0x06, 0x8348 }, + { 0x06, 0xe383 }, + { 0x06, 0x49ef }, + { 0x06, 0x65e2 }, + { 0x06, 0x834a }, + { 0x06, 0xe383 }, + { 0x06, 0x4b1b }, + { 0x06, 0x56aa }, + { 0x06, 0x0eef }, + { 0x06, 0x56e6 }, + { 0x06, 0x834a }, + { 0x06, 0xe783 }, + { 0x06, 0x4be2 }, + { 0x06, 0x834d }, + { 0x06, 0xe683 }, + { 0x06, 0x4ce0 }, + { 0x06, 0x834d }, + { 0x06, 0xa000 }, + { 0x06, 0x0caf }, + { 0x06, 0x81dc }, + { 0x06, 0xe083 }, + { 0x06, 0x4d10 }, + { 0x06, 0xe483 }, + { 0x06, 0x4dae }, + { 0x06, 0x0480 }, + { 0x06, 0xe483 }, + { 0x06, 0x4de0 }, + { 0x06, 0x834e }, + { 0x06, 0x7803 }, + { 0x06, 0x9e0b }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x049e }, + { 0x06, 0x04ee }, + { 0x06, 0x834e }, + { 0x06, 0x02e0 }, + { 0x06, 0x8332 }, + { 0x06, 0xe183 }, + { 0x06, 0x3359 }, + { 0x06, 0x0fe2 }, + { 0x06, 0x834d }, + { 0x06, 0x0c24 }, + { 0x06, 0x5af0 }, + { 0x06, 0x1e12 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ce5 }, + { 0x06, 0xf88d }, + { 0x06, 0xe083 }, + { 0x06, 0x30e1 }, + { 0x06, 0x8331 }, + { 0x06, 0x6801 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ae5 }, + { 0x06, 0xf88b }, + { 0x06, 0xae37 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e03 }, + { 0x06, 0xe083 }, + { 0x06, 0x4ce1 }, + { 0x06, 0x834d }, + { 0x06, 0x1b01 }, + { 0x06, 0x9e04 }, + { 0x06, 0xaaa1 }, + { 0x06, 0xaea8 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e04 }, + { 0x06, 0xee83 }, + { 0x06, 0x4f00 }, + { 0x06, 0xaeab }, + { 0x06, 0xe083 }, + { 0x06, 0x4f78 }, + { 0x06, 0x039f }, + { 0x06, 0x14ee }, + { 0x06, 0x834e }, + { 0x06, 0x05d2 }, + { 0x06, 0x40d6 }, + { 0x06, 0x5554 }, + { 0x06, 0x0282 }, + { 0x06, 0x17d2 }, + { 0x06, 0xa0d6 }, + { 0x06, 0xba00 }, + { 0x06, 0x0282 }, + { 0x06, 0x17fe }, + { 0x06, 0xfdfc }, + { 0x06, 0x05f8 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x60e1 }, + { 0x06, 0xf861 }, + { 0x06, 0x6802 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x60e5 }, + { 0x06, 0xf861 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x48e1 }, + { 0x06, 0xf849 }, + { 0x06, 0x580f }, + { 0x06, 0x1e02 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0xd000 }, + { 0x06, 0x0282 }, + { 0x06, 0x5bbf }, + { 0x06, 0x8350 }, + { 0x06, 0xef46 }, + { 0x06, 0xdc19 }, + { 0x06, 0xddd0 }, + { 0x06, 0x0102 }, + { 0x06, 0x825b }, + { 0x06, 0x0282 }, + { 0x06, 0x77e0 }, + { 0x06, 0xf860 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x6158 }, + { 0x06, 0xfde4 }, + { 0x06, 0xf860 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x61fc }, + { 0x06, 0x04f9 }, + { 0x06, 0xfafb }, + { 0x06, 0xc6bf }, + { 0x06, 0xf840 }, + { 0x06, 0xbe83 }, + { 0x06, 0x50a0 }, + { 0x06, 0x0101 }, + { 0x06, 0x071b }, + { 0x06, 0x89cf }, + { 0x06, 0xd208 }, + { 0x06, 0xebdb }, + { 0x06, 0x19b2 }, + { 0x06, 0xfbff }, + { 0x06, 0xfefd }, + { 0x06, 0x04f8 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x48e1 }, + { 0x06, 0xf849 }, + { 0x06, 0x6808 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0x58f7 }, + { 0x06, 0xe4f8 }, + { 0x06, 0x48e5 }, + { 0x06, 0xf849 }, + { 0x06, 0xfc04 }, + { 0x06, 0x4d20 }, + { 0x06, 0x0002 }, + { 0x06, 0x4e22 }, + { 0x06, 0x0002 }, + { 0x06, 0x4ddf }, + { 0x06, 0xff01 }, + { 0x06, 0x4edd }, + { 0x06, 0xff01 }, + { 0x05, 0x83d4 }, + { 0x06, 0x8000 }, + { 0x05, 0x83d8 }, + { 0x06, 0x8051 }, + { 0x02, 0x6010 }, + { 0x03, 0xdc00 }, + { 0x05, 0xfff6 }, + { 0x06, 0x00fc }, { 0x1f, 0x0000 }, - { 0x14, 0x0060 }, + { 0x1f, 0x0000 }, - { 0x0d, 0xf8a0 }, + { 0x0d, 0xf880 }, + { 0x1f, 0x0000 } + }; + + rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef); + mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00); + + rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); + + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = mdio_read(ioaddr, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + mdio_write(ioaddr, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + mdio_write(ioaddr, 0x0d, val | set[i]); + } + } else { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_patch(ioaddr, 0x0d, 0x0300); + mdio_patch(ioaddr, 0x0f, 0x0010); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); + mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); + + rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2)); +} + +static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr) +{ + static struct phy_reg phy_reg_init_0[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, { 0x1f, 0x0005 }, - { 0x05, 0xffc2 } + { 0x05, 0x8332 }, + { 0x06, 0x5561 } + }; + static struct phy_reg phy_reg_init_1[] = { + { 0x1f, 0x0005 }, + { 0x05, 0xffc2 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8000 }, + { 0x06, 0xf8f9 }, + { 0x06, 0xfaee }, + { 0x06, 0xf8ea }, + { 0x06, 0x00ee }, + { 0x06, 0xf8eb }, + { 0x06, 0x00e2 }, + { 0x06, 0xf87c }, + { 0x06, 0xe3f8 }, + { 0x06, 0x7da5 }, + { 0x06, 0x1111 }, + { 0x06, 0x12d2 }, + { 0x06, 0x40d6 }, + { 0x06, 0x4444 }, + { 0x06, 0x0281 }, + { 0x06, 0xc6d2 }, + { 0x06, 0xa0d6 }, + { 0x06, 0xaaaa }, + { 0x06, 0x0281 }, + { 0x06, 0xc6ae }, + { 0x06, 0x0fa5 }, + { 0x06, 0x4444 }, + { 0x06, 0x02ae }, + { 0x06, 0x4da5 }, + { 0x06, 0xaaaa }, + { 0x06, 0x02ae }, + { 0x06, 0x47af }, + { 0x06, 0x81c2 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e00 }, + { 0x06, 0xee83 }, + { 0x06, 0x4d0f }, + { 0x06, 0xee83 }, + { 0x06, 0x4c0f }, + { 0x06, 0xee83 }, + { 0x06, 0x4f00 }, + { 0x06, 0xee83 }, + { 0x06, 0x5100 }, + { 0x06, 0xee83 }, + { 0x06, 0x4aff }, + { 0x06, 0xee83 }, + { 0x06, 0x4bff }, + { 0x06, 0xe083 }, + { 0x06, 0x30e1 }, + { 0x06, 0x8331 }, + { 0x06, 0x58fe }, + { 0x06, 0xe4f8 }, + { 0x06, 0x8ae5 }, + { 0x06, 0xf88b }, + { 0x06, 0xe083 }, + { 0x06, 0x32e1 }, + { 0x06, 0x8333 }, + { 0x06, 0x590f }, + { 0x06, 0xe283 }, + { 0x06, 0x4d0c }, + { 0x06, 0x245a }, + { 0x06, 0xf01e }, + { 0x06, 0x12e4 }, + { 0x06, 0xf88c }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8daf }, + { 0x06, 0x81c2 }, + { 0x06, 0xe083 }, + { 0x06, 0x4f10 }, + { 0x06, 0xe483 }, + { 0x06, 0x4fe0 }, + { 0x06, 0x834e }, + { 0x06, 0x7800 }, + { 0x06, 0x9f0a }, + { 0x06, 0xe083 }, + { 0x06, 0x4fa0 }, + { 0x06, 0x10a5 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e01 }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x059e }, + { 0x06, 0x9ae0 }, + { 0x06, 0x834e }, + { 0x06, 0x7804 }, + { 0x06, 0x9e10 }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x039e }, + { 0x06, 0x0fe0 }, + { 0x06, 0x834e }, + { 0x06, 0x7801 }, + { 0x06, 0x9e05 }, + { 0x06, 0xae0c }, + { 0x06, 0xaf81 }, + { 0x06, 0xa7af }, + { 0x06, 0x8152 }, + { 0x06, 0xaf81 }, + { 0x06, 0x8baf }, + { 0x06, 0x81c2 }, + { 0x06, 0xee83 }, + { 0x06, 0x4800 }, + { 0x06, 0xee83 }, + { 0x06, 0x4900 }, + { 0x06, 0xe083 }, + { 0x06, 0x5110 }, + { 0x06, 0xe483 }, + { 0x06, 0x5158 }, + { 0x06, 0x019f }, + { 0x06, 0xead0 }, + { 0x06, 0x00d1 }, + { 0x06, 0x801f }, + { 0x06, 0x66e2 }, + { 0x06, 0xf8ea }, + { 0x06, 0xe3f8 }, + { 0x06, 0xeb5a }, + { 0x06, 0xf81e }, + { 0x06, 0x20e6 }, + { 0x06, 0xf8ea }, + { 0x06, 0xe5f8 }, + { 0x06, 0xebd3 }, + { 0x06, 0x02b3 }, + { 0x06, 0xfee2 }, + { 0x06, 0xf87c }, + { 0x06, 0xef32 }, + { 0x06, 0x5b80 }, + { 0x06, 0xe3f8 }, + { 0x06, 0x7d9e }, + { 0x06, 0x037d }, + { 0x06, 0xffff }, + { 0x06, 0x0d58 }, + { 0x06, 0x1c55 }, + { 0x06, 0x1a65 }, + { 0x06, 0x11a1 }, + { 0x06, 0x90d3 }, + { 0x06, 0xe283 }, + { 0x06, 0x48e3 }, + { 0x06, 0x8349 }, + { 0x06, 0x1b56 }, + { 0x06, 0xab08 }, + { 0x06, 0xef56 }, + { 0x06, 0xe683 }, + { 0x06, 0x48e7 }, + { 0x06, 0x8349 }, + { 0x06, 0x10d1 }, + { 0x06, 0x801f }, + { 0x06, 0x66a0 }, + { 0x06, 0x04b9 }, + { 0x06, 0xe283 }, + { 0x06, 0x48e3 }, + { 0x06, 0x8349 }, + { 0x06, 0xef65 }, + { 0x06, 0xe283 }, + { 0x06, 0x4ae3 }, + { 0x06, 0x834b }, + { 0x06, 0x1b56 }, + { 0x06, 0xaa0e }, + { 0x06, 0xef56 }, + { 0x06, 0xe683 }, + { 0x06, 0x4ae7 }, + { 0x06, 0x834b }, + { 0x06, 0xe283 }, + { 0x06, 0x4de6 }, + { 0x06, 0x834c }, + { 0x06, 0xe083 }, + { 0x06, 0x4da0 }, + { 0x06, 0x000c }, + { 0x06, 0xaf81 }, + { 0x06, 0x8be0 }, + { 0x06, 0x834d }, + { 0x06, 0x10e4 }, + { 0x06, 0x834d }, + { 0x06, 0xae04 }, + { 0x06, 0x80e4 }, + { 0x06, 0x834d }, + { 0x06, 0xe083 }, + { 0x06, 0x4e78 }, + { 0x06, 0x039e }, + { 0x06, 0x0be0 }, + { 0x06, 0x834e }, + { 0x06, 0x7804 }, + { 0x06, 0x9e04 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e02 }, + { 0x06, 0xe083 }, + { 0x06, 0x32e1 }, + { 0x06, 0x8333 }, + { 0x06, 0x590f }, + { 0x06, 0xe283 }, + { 0x06, 0x4d0c }, + { 0x06, 0x245a }, + { 0x06, 0xf01e }, + { 0x06, 0x12e4 }, + { 0x06, 0xf88c }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8de0 }, + { 0x06, 0x8330 }, + { 0x06, 0xe183 }, + { 0x06, 0x3168 }, + { 0x06, 0x01e4 }, + { 0x06, 0xf88a }, + { 0x06, 0xe5f8 }, + { 0x06, 0x8bae }, + { 0x06, 0x37ee }, + { 0x06, 0x834e }, + { 0x06, 0x03e0 }, + { 0x06, 0x834c }, + { 0x06, 0xe183 }, + { 0x06, 0x4d1b }, + { 0x06, 0x019e }, + { 0x06, 0x04aa }, + { 0x06, 0xa1ae }, + { 0x06, 0xa8ee }, + { 0x06, 0x834e }, + { 0x06, 0x04ee }, + { 0x06, 0x834f }, + { 0x06, 0x00ae }, + { 0x06, 0xabe0 }, + { 0x06, 0x834f }, + { 0x06, 0x7803 }, + { 0x06, 0x9f14 }, + { 0x06, 0xee83 }, + { 0x06, 0x4e05 }, + { 0x06, 0xd240 }, + { 0x06, 0xd655 }, + { 0x06, 0x5402 }, + { 0x06, 0x81c6 }, + { 0x06, 0xd2a0 }, + { 0x06, 0xd6ba }, + { 0x06, 0x0002 }, + { 0x06, 0x81c6 }, + { 0x06, 0xfefd }, + { 0x06, 0xfc05 }, + { 0x06, 0xf8e0 }, + { 0x06, 0xf860 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x6168 }, + { 0x06, 0x02e4 }, + { 0x06, 0xf860 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x61e0 }, + { 0x06, 0xf848 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x4958 }, + { 0x06, 0x0f1e }, + { 0x06, 0x02e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x49d0 }, + { 0x06, 0x0002 }, + { 0x06, 0x820a }, + { 0x06, 0xbf83 }, + { 0x06, 0x50ef }, + { 0x06, 0x46dc }, + { 0x06, 0x19dd }, + { 0x06, 0xd001 }, + { 0x06, 0x0282 }, + { 0x06, 0x0a02 }, + { 0x06, 0x8226 }, + { 0x06, 0xe0f8 }, + { 0x06, 0x60e1 }, + { 0x06, 0xf861 }, + { 0x06, 0x58fd }, + { 0x06, 0xe4f8 }, + { 0x06, 0x60e5 }, + { 0x06, 0xf861 }, + { 0x06, 0xfc04 }, + { 0x06, 0xf9fa }, + { 0x06, 0xfbc6 }, + { 0x06, 0xbff8 }, + { 0x06, 0x40be }, + { 0x06, 0x8350 }, + { 0x06, 0xa001 }, + { 0x06, 0x0107 }, + { 0x06, 0x1b89 }, + { 0x06, 0xcfd2 }, + { 0x06, 0x08eb }, + { 0x06, 0xdb19 }, + { 0x06, 0xb2fb }, + { 0x06, 0xfffe }, + { 0x06, 0xfd04 }, + { 0x06, 0xf8e0 }, + { 0x06, 0xf848 }, + { 0x06, 0xe1f8 }, + { 0x06, 0x4968 }, + { 0x06, 0x08e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x4958 }, + { 0x06, 0xf7e4 }, + { 0x06, 0xf848 }, + { 0x06, 0xe5f8 }, + { 0x06, 0x49fc }, + { 0x06, 0x044d }, + { 0x06, 0x2000 }, + { 0x06, 0x024e }, + { 0x06, 0x2200 }, + { 0x06, 0x024d }, + { 0x06, 0xdfff }, + { 0x06, 0x014e }, + { 0x06, 0xddff }, + { 0x06, 0x0100 }, + { 0x05, 0x83d8 }, + { 0x06, 0x8000 }, + { 0x03, 0xdc00 }, + { 0x05, 0xfff6 }, + { 0x06, 0x00fc }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + { 0x1f, 0x0000 } }; rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); - if (mdio_read(ioaddr, 0x06) == 0xc400) { - struct phy_reg phy_reg_init_1[] = { + if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, { 0x1f, 0x0005 }, - { 0x01, 0x0300 }, - { 0x1f, 0x0000 }, - { 0x11, 0x401c }, - { 0x16, 0x4100 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = mdio_read(ioaddr, 0x0d); + if ((val & 0x00ff) != 0x006c) { + u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + mdio_write(ioaddr, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + mdio_write(ioaddr, 0x0d, val | set[i]); + } + } else { + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, { 0x1f, 0x0005 }, - { 0x07, 0x0010 }, - { 0x05, 0x83dc }, - { 0x06, 0x087d }, - { 0x05, 0x8300 }, - { 0x06, 0x0101 }, - { 0x06, 0x05f8 }, - { 0x06, 0xf9fa }, - { 0x06, 0xfbef }, - { 0x06, 0x79e2 }, - { 0x06, 0x835f }, - { 0x06, 0xe0f8 }, - { 0x06, 0x9ae1 }, - { 0x06, 0xf89b }, - { 0x06, 0xef31 }, - { 0x06, 0x3b65 }, - { 0x06, 0xaa07 }, - { 0x06, 0x81e4 }, - { 0x06, 0xf89a }, - { 0x06, 0xe5f8 }, - { 0x06, 0x9baf }, - { 0x06, 0x06ae }, - { 0x05, 0x83dc }, - { 0x06, 0x8300 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } }; - rtl_phy_write(ioaddr, phy_reg_init_1, - ARRAY_SIZE(phy_reg_init_1)); + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } - mdio_write(ioaddr, 0x1f, 0x0000); + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); + mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); + + mdio_write(ioaddr, 0x1f, 0x0001); + mdio_write(ioaddr, 0x17, 0x0cc0); + + mdio_write(ioaddr, 0x1f, 0x0002); + mdio_patch(ioaddr, 0x0f, 0x0017); + + rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); +} + +static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr) +{ + struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); } static void rtl8102e_hw_phy_config(void __iomem *ioaddr) @@ -1792,7 +2664,13 @@ static void rtl_hw_phy_config(struct net_device *dev) rtl8168cp_2_hw_phy_config(ioaddr); break; case RTL_GIGA_MAC_VER_25: - rtl8168d_hw_phy_config(ioaddr); + rtl8168d_1_hw_phy_config(ioaddr); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(ioaddr); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(ioaddr); break; default: @@ -2322,6 +3200,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } rtl8169_init_phy(dev, tp); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan); + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); out: @@ -2493,7 +3379,7 @@ static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) { /* Low hurts. Let's disable the filtering. */ - RTL_W16(RxMaxSize, rx_buf_sz); + RTL_W16(RxMaxSize, rx_buf_sz + 1); } static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) @@ -2863,6 +3749,8 @@ static void rtl_hw_start_8168(struct net_device *dev) break; case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: rtl_hw_start_8168d(ioaddr, pdev); break; diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 01f9432c31e..98bff5ada09 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -444,7 +444,8 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, * the appropriate LRO method */ static void efx_rx_packet_lro(struct efx_channel *channel, - struct efx_rx_buffer *rx_buf) + struct efx_rx_buffer *rx_buf, + bool checksummed) { struct napi_struct *napi = &channel->napi_str; @@ -466,7 +467,8 @@ static void efx_rx_packet_lro(struct efx_channel *channel, skb->len = rx_buf->len; skb->data_len = rx_buf->len; skb->truesize += rx_buf->len; - skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->ip_summed = + checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; napi_gro_frags(napi); @@ -475,6 +477,7 @@ out: rx_buf->page = NULL; } else { EFX_BUG_ON_PARANOID(!rx_buf->skb); + EFX_BUG_ON_PARANOID(!checksummed); napi_gro_receive(napi, rx_buf->skb); rx_buf->skb = NULL; @@ -570,7 +573,7 @@ void __efx_rx_packet(struct efx_channel *channel, } if (likely(checksummed || rx_buf->page)) { - efx_rx_packet_lro(channel, rx_buf); + efx_rx_packet_lro(channel, rx_buf, checksummed); goto done; } diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index f49d0800c1d..528b912a4b0 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c @@ -30,6 +30,7 @@ #include <linux/phy.h> #include <linux/cache.h> #include <linux/io.h> +#include <asm/cacheflush.h> #include "sh_eth.h" diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 2ab5c39f33c..6a10d7ba587 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -4538,6 +4538,8 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_netdev; } + netif_carrier_off(dev); + netif_napi_add(dev, &hw->napi, sky2_poll, NAPI_WEIGHT); err = request_irq(pdev->irq, sky2_intr, diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig new file mode 100644 index 00000000000..35eaa5251d7 --- /dev/null +++ b/drivers/net/stmmac/Kconfig @@ -0,0 +1,53 @@ +config STMMAC_ETH + tristate "STMicroelectronics 10/100/1000 Ethernet driver" + select MII + select PHYLIB + depends on NETDEVICES && CPU_SUBTYPE_ST40 + help + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet + controllers. ST Ethernet IPs are built around a Synopsys IP Core. + +if STMMAC_ETH + +config STMMAC_DA + bool "STMMAC DMA arbitration scheme" + default n + help + Selecting this option, rx has priority over Tx (only for Giga + Ethernet device). + By default, the DMA arbitration scheme is based on Round-robin + (rx:tx priority is 1:1). + +config STMMAC_DUAL_MAC + bool "STMMAC: dual mac support (EXPERIMENTAL)" + default n + depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER + help + Some ST SoCs (for example the stx7141 and stx7200c2) have two + Ethernet Controllers. This option turns on the second Ethernet + device on this kind of platforms. + +config STMMAC_TIMER + bool "STMMAC Timer optimisation" + default n + help + Use an external timer for mitigating the number of network + interrupts. + +choice + prompt "Select Timer device" + depends on STMMAC_TIMER + +config STMMAC_TMU_TIMER + bool "TMU channel 2" + depends on CPU_SH4 + help + +config STMMAC_RTC_TIMER + bool "Real time clock" + depends on RTC_CLASS + help + +endchoice + +endif diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile new file mode 100644 index 00000000000..b2d7a5564df --- /dev/null +++ b/drivers/net/stmmac/Makefile @@ -0,0 +1,4 @@ +obj-$(CONFIG_STMMAC_ETH) += stmmac.o +stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o +stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ + mac100.o gmac.o $(stmmac-y) diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h new file mode 100644 index 00000000000..e49e5188e88 --- /dev/null +++ b/drivers/net/stmmac/common.h @@ -0,0 +1,330 @@ +/******************************************************************************* + STMMAC Common Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include "descs.h" +#include <linux/io.h> + +/* ********************************************* + DMA CRS Control and Status Register Mapping + * *********************************************/ +#define DMA_BUS_MODE 0x00001000 /* Bus Mode */ +#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ +#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */ +#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */ +#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */ +#define DMA_STATUS 0x00001014 /* Status Register */ +#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ +#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ +#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ +#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ +#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ + +/* ******************************** + DMA Control register defines + * ********************************/ +#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */ +#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */ + +/* ************************************** + DMA Interrupt Enable register defines + * **************************************/ +/**** NORMAL INTERRUPT ****/ +#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */ +#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */ +#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */ +#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */ + +#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \ + DMA_INTR_ENA_TIE) + +/**** ABNORMAL INTERRUPT ****/ +#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */ +#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */ +#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */ +#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */ +#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */ +#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */ +#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */ +#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */ +#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */ + +#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \ + DMA_INTR_ENA_UNE) + +/* DMA default interrupt mask */ +#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL) + +/* **************************** + * DMA Status register defines + * ****************************/ +#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */ +#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */ +#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */ +#define DMA_STATUS_GMI 0x08000000 +#define DMA_STATUS_GLI 0x04000000 +#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */ +#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */ +#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */ +#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */ +#define DMA_STATUS_TS_SHIFT 20 +#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */ +#define DMA_STATUS_RS_SHIFT 17 +#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */ +#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */ +#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */ +#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */ +#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */ +#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */ +#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */ +#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */ +#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */ +#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */ +#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */ +#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */ +#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ +#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ +#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ + +/* Other defines */ +#define HASH_TABLE_SIZE 64 +#define PAUSE_TIME 0x200 + +/* Flow Control defines */ +#define FLOW_OFF 0 +#define FLOW_RX 1 +#define FLOW_TX 2 +#define FLOW_AUTO (FLOW_TX | FLOW_RX) + +/* DMA STORE-AND-FORWARD Operation Mode */ +#define SF_DMA_MODE 1 + +#define HW_CSUM 1 +#define NO_HW_CSUM 0 + +/* GMAC TX FIFO is 8K, Rx FIFO is 16K */ +#define BUF_SIZE_16KiB 16384 +#define BUF_SIZE_8KiB 8192 +#define BUF_SIZE_4KiB 4096 +#define BUF_SIZE_2KiB 2048 + +/* Power Down and WOL */ +#define PMT_NOT_SUPPORTED 0 +#define PMT_SUPPORTED 1 + +/* Common MAC defines */ +#define MAC_CTRL_REG 0x00000000 /* MAC Control */ +#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */ +#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */ + +/* MAC Management Counters register */ +#define MMC_CONTROL 0x00000100 /* MMC Control */ +#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */ +#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */ +#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */ +#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */ + +#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */ +#define MMC_CONTROL_MAX_FRM_SHIFT 3 +#define MMC_CONTROL_MAX_FRAME 0x7FF + +struct stmmac_extra_stats { + /* Transmit errors */ + unsigned long tx_underflow ____cacheline_aligned; + unsigned long tx_carrier; + unsigned long tx_losscarrier; + unsigned long tx_heartbeat; + unsigned long tx_deferred; + unsigned long tx_vlan; + unsigned long tx_jabber; + unsigned long tx_frame_flushed; + unsigned long tx_payload_error; + unsigned long tx_ip_header_error; + /* Receive errors */ + unsigned long rx_desc; + unsigned long rx_partial; + unsigned long rx_runt; + unsigned long rx_toolong; + unsigned long rx_collision; + unsigned long rx_crc; + unsigned long rx_lenght; + unsigned long rx_mii; + unsigned long rx_multicast; + unsigned long rx_gmac_overflow; + unsigned long rx_watchdog; + unsigned long da_rx_filter_fail; + unsigned long sa_rx_filter_fail; + unsigned long rx_missed_cntr; + unsigned long rx_overflow_cntr; + unsigned long rx_vlan; + /* Tx/Rx IRQ errors */ + unsigned long tx_undeflow_irq; + unsigned long tx_process_stopped_irq; + unsigned long tx_jabber_irq; + unsigned long rx_overflow_irq; + unsigned long rx_buf_unav_irq; + unsigned long rx_process_stopped_irq; + unsigned long rx_watchdog_irq; + unsigned long tx_early_irq; + unsigned long fatal_bus_error_irq; + /* Extra info */ + unsigned long threshold; + unsigned long tx_pkt_n; + unsigned long rx_pkt_n; + unsigned long poll_n; + unsigned long sched_timer_n; + unsigned long normal_irq_n; +}; + +/* GMAC core can compute the checksums in HW. */ +enum rx_frame_status { + good_frame = 0, + discard_frame = 1, + csum_none = 2, +}; + +static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], + unsigned int high, unsigned int low) +{ + unsigned long data; + + data = (addr[5] << 8) | addr[4]; + writel(data, ioaddr + high); + data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; + writel(data, ioaddr + low); + + return; +} + +static inline void stmmac_get_mac_addr(unsigned long ioaddr, + unsigned char *addr, unsigned int high, + unsigned int low) +{ + unsigned int hi_addr, lo_addr; + + /* Read the MAC address from the hardware */ + hi_addr = readl(ioaddr + high); + lo_addr = readl(ioaddr + low); + + /* Extract the MAC address from the high and low words */ + addr[0] = lo_addr & 0xff; + addr[1] = (lo_addr >> 8) & 0xff; + addr[2] = (lo_addr >> 16) & 0xff; + addr[3] = (lo_addr >> 24) & 0xff; + addr[4] = hi_addr & 0xff; + addr[5] = (hi_addr >> 8) & 0xff; + + return; +} + +struct stmmac_ops { + /* MAC core initialization */ + void (*core_init) (unsigned long ioaddr) ____cacheline_aligned; + /* DMA core initialization */ + int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx); + /* Dump MAC registers */ + void (*dump_mac_regs) (unsigned long ioaddr); + /* Dump DMA registers */ + void (*dump_dma_regs) (unsigned long ioaddr); + /* Set tx/rx threshold in the csr6 register + * An invalid value enables the store-and-forward mode */ + void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode); + /* To track extra statistic (if supported) */ + void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr); + /* RX descriptor ring initialization */ + void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic); + /* TX descriptor ring initialization */ + void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); + + /* Invoked by the xmit function to prepare the tx descriptor */ + void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, + int csum_flag); + /* Set/get the owner of the descriptor */ + void (*set_tx_owner) (struct dma_desc *p); + int (*get_tx_owner) (struct dma_desc *p); + /* Invoked by the xmit function to close the tx descriptor */ + void (*close_tx_desc) (struct dma_desc *p); + /* Clean the tx descriptor as soon as the tx irq is received */ + void (*release_tx_desc) (struct dma_desc *p); + /* Clear interrupt on tx frame completion. When this bit is + * set an interrupt happens as soon as the frame is transmitted */ + void (*clear_tx_ic) (struct dma_desc *p); + /* Last tx segment reports the transmit status */ + int (*get_tx_ls) (struct dma_desc *p); + /* Return the transmit status looking at the TDES1 */ + int (*tx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr); + /* Get the buffer size from the descriptor */ + int (*get_tx_len) (struct dma_desc *p); + /* Handle extra events on specific interrupts hw dependent */ + void (*host_irq_status) (unsigned long ioaddr); + int (*get_rx_owner) (struct dma_desc *p); + void (*set_rx_owner) (struct dma_desc *p); + /* Get the receive frame size */ + int (*get_rx_frame_len) (struct dma_desc *p); + /* Return the reception status looking at the RDES1 */ + int (*rx_status) (void *data, struct stmmac_extra_stats *x, + struct dma_desc *p); + /* Multicast filter setting */ + void (*set_filter) (struct net_device *dev); + /* Flow control setting */ + void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time); + /* Set power management mode (e.g. magic frame) */ + void (*pmt) (unsigned long ioaddr, unsigned long mode); + /* Set/Get Unicast MAC addresses */ + void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n); + void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n); +}; + +struct mac_link { + int port; + int duplex; + int speed; +}; + +struct mii_regs { + unsigned int addr; /* MII Address */ + unsigned int data; /* MII Data */ +}; + +struct hw_cap { + unsigned int version; /* Core Version register (GMAC) */ + unsigned int pmt; /* Power-Down mode (GMAC) */ + struct mac_link link; + struct mii_regs mii; +}; + +struct mac_device_info { + struct hw_cap hw; + struct stmmac_ops *ops; +}; + +struct mac_device_info *gmac_setup(unsigned long addr); +struct mac_device_info *mac100_setup(unsigned long addr); diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h new file mode 100644 index 00000000000..6d2a0b2f5e5 --- /dev/null +++ b/drivers/net/stmmac/descs.h @@ -0,0 +1,163 @@ +/******************************************************************************* + Header File to describe the DMA descriptors + Use enhanced descriptors in case of GMAC Cores. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ +struct dma_desc { + /* Receive descriptor */ + union { + struct { + /* RDES0 */ + u32 reserved1:1; + u32 crc_error:1; + u32 dribbling:1; + u32 mii_error:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 collision:1; + u32 frame_too_long:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 multicast_frame:1; + u32 run_frame:1; + u32 length_error:1; + u32 partial_frame_error:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 filtering_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 reserved2:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 reserved3:5; + u32 disable_ic:1; + } rx; + struct { + /* RDES0 */ + u32 payload_csum_error:1; + u32 crc_error:1; + u32 dribbling:1; + u32 error_gmii:1; + u32 receive_watchdog:1; + u32 frame_type:1; + u32 late_collision:1; + u32 ipc_csum_error:1; + u32 last_descriptor:1; + u32 first_descriptor:1; + u32 vlan_tag:1; + u32 overflow_error:1; + u32 length_error:1; + u32 sa_filter_fail:1; + u32 descriptor_error:1; + u32 error_summary:1; + u32 frame_length:14; + u32 da_filter_fail:1; + u32 own:1; + /* RDES1 */ + u32 buffer1_size:13; + u32 reserved1:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 buffer2_size:13; + u32 reserved2:2; + u32 disable_ic:1; + } erx; /* -- enhanced -- */ + + /* Transmit descriptor */ + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 heartbeat_fail:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 reserved1:3; + u32 error_summary:1; + u32 reserved2:15; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:11; + u32 buffer2_size:11; + u32 reserved3:1; + u32 disable_padding:1; + u32 second_address_chained:1; + u32 end_ring:1; + u32 crc_disable:1; + u32 reserved4:2; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + } tx; + struct { + /* TDES0 */ + u32 deferred:1; + u32 underflow_error:1; + u32 excessive_deferral:1; + u32 collision_count:4; + u32 vlan_frame:1; + u32 excessive_collisions:1; + u32 late_collision:1; + u32 no_carrier:1; + u32 loss_carrier:1; + u32 payload_error:1; + u32 frame_flushed:1; + u32 jabber_timeout:1; + u32 error_summary:1; + u32 ip_header_error:1; + u32 time_stamp_status:1; + u32 reserved1:2; + u32 second_address_chained:1; + u32 end_ring:1; + u32 checksum_insertion:2; + u32 reserved2:1; + u32 time_stamp_enable:1; + u32 disable_padding:1; + u32 crc_disable:1; + u32 first_segment:1; + u32 last_segment:1; + u32 interrupt:1; + u32 own:1; + /* TDES1 */ + u32 buffer1_size:13; + u32 reserved3:3; + u32 buffer2_size:13; + u32 reserved4:3; + } etx; /* -- enhanced -- */ + } des01; + unsigned int des2; + unsigned int des3; +}; + +/* Transmit checksum insertion control */ +enum tdes_csum_insertion { + cic_disabled = 0, /* Checksum Insertion Control */ + cic_only_ip = 1, /* Only IP header */ + cic_no_pseudoheader = 2, /* IP header but pseudoheader + * is not calculated */ + cic_full = 3, /* IP header and pseudoheader */ +}; diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/gmac.c new file mode 100644 index 00000000000..b624bb5bae0 --- /dev/null +++ b/drivers/net/stmmac/gmac.c @@ -0,0 +1,693 @@ +/******************************************************************************* + This is the driver for the GMAC on-chip Ethernet controller for ST SoCs. + DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for + developing this code. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" +#include "gmac.h" + +#undef GMAC_DEBUG +/*#define GMAC_DEBUG*/ +#undef FRAME_FILTER_DEBUG +/*#define FRAME_FILTER_DEBUG*/ +#ifdef GMAC_DEBUG +#define DBG(fmt, args...) printk(fmt, ## args) +#else +#define DBG(fmt, args...) do { } while (0) +#endif + +static void gmac_dump_regs(unsigned long ioaddr) +{ + int i; + pr_info("\t----------------------------------------------\n" + "\t GMAC registers (base addr = 0x%8x)\n" + "\t----------------------------------------------\n", + (unsigned int)ioaddr); + + for (i = 0; i < 55; i++) { + int offset = i * 4; + pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, + offset, readl(ioaddr + offset)); + } + return; +} + +static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); + + value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | + ((pbl << DMA_BUS_MODE_PBL_SHIFT) | + (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + +#ifdef CONFIG_STMMAC_DA + value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ +#endif + writel(value, ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* The base address of the RX/TX descriptor lists must be written into + * DMA CSR3 and CSR4, respectively. */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Transmit FIFO flush operation */ +static void gmac_flush_tx_fifo(unsigned long ioaddr) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL); + + do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF)); +} + +static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode, + int rxmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode == SF_DMA_MODE) { + DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); + /* Transmit COE type 2 cannot be done in cut-through mode. */ + csr6 |= DMA_CONTROL_TSF; + /* Operating on second frame increase the performance + * especially when transmit store-and-forward is used.*/ + csr6 |= DMA_CONTROL_OSF; + } else { + DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" + " (threshold = %d)\n", txmode); + csr6 &= ~DMA_CONTROL_TSF; + csr6 &= DMA_CONTROL_TC_TX_MASK; + /* Set the transmit threashold */ + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else if (txmode <= 128) + csr6 |= DMA_CONTROL_TTC_128; + else if (txmode <= 192) + csr6 |= DMA_CONTROL_TTC_192; + else + csr6 |= DMA_CONTROL_TTC_256; + } + + if (rxmode == SF_DMA_MODE) { + DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); + csr6 |= DMA_CONTROL_RSF; + } else { + DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" + " (threshold = %d)\n", rxmode); + csr6 &= ~DMA_CONTROL_RSF; + csr6 &= DMA_CONTROL_TC_RX_MASK; + if (rxmode <= 32) + csr6 |= DMA_CONTROL_RTC_32; + else if (rxmode <= 64) + csr6 |= DMA_CONTROL_RTC_64; + else if (rxmode <= 96) + csr6 |= DMA_CONTROL_RTC_96; + else + csr6 |= DMA_CONTROL_RTC_128; + } + + writel(csr6, ioaddr + DMA_CONTROL); + return; +} + +/* Not yet implemented --- no RMON module */ +static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr) +{ + return; +} + +static void gmac_dump_dma_regs(unsigned long ioaddr) +{ + int i; + pr_info(" DMA registers\n"); + for (i = 0; i < 22; i++) { + if ((i < 9) || (i > 17)) { + int offset = i * 4; + pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + offset), + readl(ioaddr + DMA_BUS_MODE + offset)); + } + } + return; +} + +static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.etx.error_summary)) { + DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); + if (unlikely(p->des01.etx.jabber_timeout)) { + DBG(KERN_ERR "\tjabber_timeout error\n"); + x->tx_jabber++; + } + + if (unlikely(p->des01.etx.frame_flushed)) { + DBG(KERN_ERR "\tframe_flushed error\n"); + x->tx_frame_flushed++; + gmac_flush_tx_fifo(ioaddr); + } + + if (unlikely(p->des01.etx.loss_carrier)) { + DBG(KERN_ERR "\tloss_carrier error\n"); + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.no_carrier)) { + DBG(KERN_ERR "\tno_carrier error\n"); + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.etx.late_collision)) { + DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_collisions)) { + DBG(KERN_ERR "\texcessive_collisions\n"); + stats->collisions += p->des01.etx.collision_count; + } + if (unlikely(p->des01.etx.excessive_deferral)) { + DBG(KERN_INFO "\texcessive tx_deferral\n"); + x->tx_deferred++; + } + + if (unlikely(p->des01.etx.underflow_error)) { + DBG(KERN_ERR "\tunderflow error\n"); + gmac_flush_tx_fifo(ioaddr); + x->tx_underflow++; + } + + if (unlikely(p->des01.etx.ip_header_error)) { + DBG(KERN_ERR "\tTX IP header csum error\n"); + x->tx_ip_header_error++; + } + + if (unlikely(p->des01.etx.payload_error)) { + DBG(KERN_ERR "\tAddr/Payload csum error\n"); + x->tx_payload_error++; + gmac_flush_tx_fifo(ioaddr); + } + + ret = -1; + } + + if (unlikely(p->des01.etx.deferred)) { + DBG(KERN_INFO "GMAC TX status: tx deferred\n"); + x->tx_deferred++; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.etx.vlan_frame) { + DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); + x->tx_vlan++; + } +#endif + + return ret; +} + +static int gmac_get_tx_len(struct dma_desc *p) +{ + return p->des01.etx.buffer1_size; +} + +static int gmac_coe_rdes0(int ipc_err, int type, int payload_err) +{ + int ret = good_frame; + u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; + + /* bits 5 7 0 | Frame status + * ---------------------------------------------------------- + * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects) + * 1 0 0 | IPv4/6 No CSUM errorS. + * 1 0 1 | IPv4/6 CSUM PAYLOAD error + * 1 1 0 | IPv4/6 CSUM IP HR error + * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS + * 0 0 1 | IPv4/6 unsupported IP PAYLOAD + * 0 1 1 | COE bypassed.. no IPv4/6 frame + * 0 1 0 | Reserved. + */ + if (status == 0x0) { + DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); + ret = good_frame; + } else if (status == 0x4) { + DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); + ret = good_frame; + } else if (status == 0x5) { + DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); + ret = csum_none; + } else if (status == 0x6) { + DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); + ret = csum_none; + } else if (status == 0x7) { + DBG(KERN_ERR + "RX Des0 status: IPv4/6 Header and Payload Error.\n"); + ret = csum_none; + } else if (status == 0x1) { + DBG(KERN_ERR + "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); + ret = discard_frame; + } else if (status == 0x3) { + DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); + ret = discard_frame; + } + return ret; +} + +static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = good_frame; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.erx.error_summary)) { + DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx); + if (unlikely(p->des01.erx.descriptor_error)) { + DBG(KERN_ERR "\tdescriptor error\n"); + x->rx_desc++; + stats->rx_length_errors++; + } + if (unlikely(p->des01.erx.overflow_error)) { + DBG(KERN_ERR "\toverflow error\n"); + x->rx_gmac_overflow++; + } + + if (unlikely(p->des01.erx.ipc_csum_error)) + DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); + + if (unlikely(p->des01.erx.late_collision)) { + DBG(KERN_ERR "\tlate_collision error\n"); + stats->collisions++; + stats->collisions++; + } + if (unlikely(p->des01.erx.receive_watchdog)) { + DBG(KERN_ERR "\treceive_watchdog error\n"); + x->rx_watchdog++; + } + if (unlikely(p->des01.erx.error_gmii)) { + DBG(KERN_ERR "\tReceive Error\n"); + x->rx_mii++; + } + if (unlikely(p->des01.erx.crc_error)) { + DBG(KERN_ERR "\tCRC error\n"); + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + + /* After a payload csum error, the ES bit is set. + * It doesn't match with the information reported into the databook. + * At any rate, we need to understand if the CSUM hw computation is ok + * and report this info to the upper layers. */ + ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error, + p->des01.erx.frame_type, p->des01.erx.payload_csum_error); + + if (unlikely(p->des01.erx.dribbling)) { + DBG(KERN_ERR "GMAC RX: dribbling error\n"); + ret = discard_frame; + } + if (unlikely(p->des01.erx.sa_filter_fail)) { + DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); + x->sa_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.da_filter_fail)) { + DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n"); + x->da_rx_filter_fail++; + ret = discard_frame; + } + if (unlikely(p->des01.erx.length_error)) { + DBG(KERN_ERR "GMAC RX: length_error error\n"); + x->rx_lenght++; + ret = discard_frame; + } +#ifdef STMMAC_VLAN_TAG_USED + if (p->des01.erx.vlan_tag) { + DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); + x->rx_vlan++; + } +#endif + return ret; +} + +static void gmac_irq_status(unsigned long ioaddr) +{ + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); + + /* Not used events (e.g. MMC interrupts) are not handled. */ + if ((intr_status & mmc_tx_irq)) + DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", + readl(ioaddr + GMAC_MMC_TX_INTR)); + if (unlikely(intr_status & mmc_rx_irq)) + DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", + readl(ioaddr + GMAC_MMC_RX_INTR)); + if (unlikely(intr_status & mmc_rx_csum_offload_irq)) + DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", + readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); + if (unlikely(intr_status & pmt_irq)) { + DBG(KERN_DEBUG "GMAC: received Magic frame\n"); + /* clear the PMT bits 5 and 6 by reading the PMT + * status register. */ + readl(ioaddr + GMAC_PMT); + } + + return; +} + +static void gmac_core_init(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + GMAC_CONTROL); + value |= GMAC_CORE_INIT; + writel(value, ioaddr + GMAC_CONTROL); + + /* STBus Bridge Configuration */ + /*writel(0xc5608, ioaddr + 0x00007000);*/ + + /* Freeze MMC counters */ + writel(0x8, ioaddr + GMAC_MMC_CTRL); + /* Mask GMAC interrupts */ + writel(0x207, ioaddr + GMAC_INT_MASK); + +#ifdef STMMAC_VLAN_TAG_USED + /* Tag detection without filtering */ + writel(0x0, ioaddr + GMAC_VLAN_TAG); +#endif + return; +} + +static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n), + GMAC_ADDR_LOW(reg_n)); +} + +static void gmac_set_filter(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + unsigned int value = 0; + + DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", + __func__, dev->mc_count, dev->uc_count); + + if (dev->flags & IFF_PROMISC) + value = GMAC_FRAME_FILTER_PR; + else if ((dev->mc_count > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + writel(0xffffffff, ioaddr + GMAC_HASH_HIGH); + writel(0xffffffff, ioaddr + GMAC_HASH_LOW); + } else if (dev->mc_count > 0) { + int i; + u32 mc_filter[2]; + struct dev_mc_list *mclist; + + /* Hash filter for multicast */ + value = GMAC_FRAME_FILTER_HMC; + + memset(mc_filter, 0, sizeof(mc_filter)); + for (i = 0, mclist = dev->mc_list; + mclist && i < dev->mc_count; i++, mclist = mclist->next) { + /* The upper 6 bits of the calculated CRC are used to + index the contens of the hash table */ + int bit_nr = + bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + GMAC_HASH_LOW); + writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH); + } + + /* Handle multiple unicast addresses (perfect filtering)*/ + if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES) + /* Switch to promiscuous mode is more than 16 addrs + are required */ + value |= GMAC_FRAME_FILTER_PR; + else { + int i; + struct dev_addr_list *uc_ptr = dev->uc_list; + + for (i = 0; i < dev->uc_count; i++) { + gmac_set_umac_addr(ioaddr, uc_ptr->da_addr, + i + 1); + + DBG(KERN_INFO "\t%d " + "- Unicast addr %02x:%02x:%02x:%02x:%02x:" + "%02x\n", i + 1, + uc_ptr->da_addr[0], uc_ptr->da_addr[1], + uc_ptr->da_addr[2], uc_ptr->da_addr[3], + uc_ptr->da_addr[4], uc_ptr->da_addr[5]); + uc_ptr = uc_ptr->next; + } + } + +#ifdef FRAME_FILTER_DEBUG + /* Enable Receive all mode (to debug filtering_fail errors) */ + value |= GMAC_FRAME_FILTER_RA; +#endif + writel(value, ioaddr + GMAC_FRAME_FILTER); + + DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " + "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), + readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); + + return; +} + +static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + unsigned int flow = 0; + + DBG(KERN_DEBUG "GMAC Flow-Control:\n"); + if (fc & FLOW_RX) { + DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_RFE; + } + if (fc & FLOW_TX) { + DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); + flow |= GMAC_FLOW_CTRL_TFE; + } + + if (duplex) { + DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); + flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); + } + + writel(flow, ioaddr + GMAC_FLOW_CTRL); + return; +} + +static void gmac_pmt(unsigned long ioaddr, unsigned long mode) +{ + unsigned int pmt = 0; + + if (mode == WAKE_MAGIC) { + DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); + pmt |= power_down | magic_pkt_en; + } else if (mode == WAKE_UCAST) { + DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); + pmt |= global_unicast; + } + + writel(pmt, ioaddr + GMAC_PMT); + return; +} + +static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + /* To support jumbo frames */ + p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; + if (i == ring_size - 1) + p->des01.erx.end_ring = 1; + if (disable_rx_ic) + p->des01.erx.disable_ic = 1; + p++; + } + return; +} + +static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + + for (i = 0; i < ring_size; i++) { + p->des01.etx.own = 0; + if (i == ring_size - 1) + p->des01.etx.end_ring = 1; + p++; + } + + return; +} + +static int gmac_get_tx_owner(struct dma_desc *p) +{ + return p->des01.etx.own; +} + +static int gmac_get_rx_owner(struct dma_desc *p) +{ + return p->des01.erx.own; +} + +static void gmac_set_tx_owner(struct dma_desc *p) +{ + p->des01.etx.own = 1; +} + +static void gmac_set_rx_owner(struct dma_desc *p) +{ + p->des01.erx.own = 1; +} + +static int gmac_get_tx_ls(struct dma_desc *p) +{ + return p->des01.etx.last_segment; +} + +static void gmac_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.etx.end_ring; + + memset(p, 0, sizeof(struct dma_desc)); + p->des01.etx.end_ring = ter; + + return; +} + +static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.etx.first_segment = is_fs; + if (unlikely(len > BUF_SIZE_4KiB)) { + p->des01.etx.buffer1_size = BUF_SIZE_4KiB; + p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + } else { + p->des01.etx.buffer1_size = len; + } + if (likely(csum_flag)) + p->des01.etx.checksum_insertion = cic_full; +} + +static void gmac_clear_tx_ic(struct dma_desc *p) +{ + p->des01.etx.interrupt = 0; +} + +static void gmac_close_tx_desc(struct dma_desc *p) +{ + p->des01.etx.last_segment = 1; + p->des01.etx.interrupt = 1; +} + +static int gmac_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.erx.frame_length; +} + +struct stmmac_ops gmac_driver = { + .core_init = gmac_core_init, + .dump_mac_regs = gmac_dump_regs, + .dma_init = gmac_dma_init, + .dump_dma_regs = gmac_dump_dma_regs, + .dma_mode = gmac_dma_operation_mode, + .dma_diagnostic_fr = gmac_dma_diagnostic_fr, + .tx_status = gmac_get_tx_frame_status, + .rx_status = gmac_get_rx_frame_status, + .get_tx_len = gmac_get_tx_len, + .set_filter = gmac_set_filter, + .flow_ctrl = gmac_flow_ctrl, + .pmt = gmac_pmt, + .init_rx_desc = gmac_init_rx_desc, + .init_tx_desc = gmac_init_tx_desc, + .get_tx_owner = gmac_get_tx_owner, + .get_rx_owner = gmac_get_rx_owner, + .release_tx_desc = gmac_release_tx_desc, + .prepare_tx_desc = gmac_prepare_tx_desc, + .clear_tx_ic = gmac_clear_tx_ic, + .close_tx_desc = gmac_close_tx_desc, + .get_tx_ls = gmac_get_tx_ls, + .set_tx_owner = gmac_set_tx_owner, + .set_rx_owner = gmac_set_rx_owner, + .get_rx_frame_len = gmac_get_rx_frame_len, + .host_irq_status = gmac_irq_status, + .set_umac_addr = gmac_set_umac_addr, + .get_umac_addr = gmac_get_umac_addr, +}; + +struct mac_device_info *gmac_setup(unsigned long ioaddr) +{ + struct mac_device_info *mac; + u32 uid = readl(ioaddr + GMAC_VERSION); + + pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n", + ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff)); + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + + mac->ops = &gmac_driver; + mac->hw.pmt = PMT_SUPPORTED; + mac->hw.link.port = GMAC_CONTROL_PS; + mac->hw.link.duplex = GMAC_CONTROL_DM; + mac->hw.link.speed = GMAC_CONTROL_FES; + mac->hw.mii.addr = GMAC_MII_ADDR; + mac->hw.mii.data = GMAC_MII_DATA; + + return mac; +} diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/gmac.h new file mode 100644 index 00000000000..684a363120a --- /dev/null +++ b/drivers/net/stmmac/gmac.h @@ -0,0 +1,204 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#define GMAC_CONTROL 0x00000000 /* Configuration */ +#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ +#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ +#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ +#define GMAC_MII_ADDR 0x00000010 /* MII Address */ +#define GMAC_MII_DATA 0x00000014 /* MII Data */ +#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ +#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ +#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */ +#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ + +#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ +enum gmac_irq_status { + time_stamp_irq = 0x0200, + mmc_rx_csum_offload_irq = 0x0080, + mmc_tx_irq = 0x0040, + mmc_rx_irq = 0x0020, + mmc_irq = 0x0010, + pmt_irq = 0x0008, + pcs_ane_irq = 0x0004, + pcs_link_irq = 0x0002, + rgmii_irq = 0x0001, +}; +#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */ + +/* PMT Control and Status */ +#define GMAC_PMT 0x0000002c +enum power_event { + pointer_reset = 0x80000000, + global_unicast = 0x00000200, + wake_up_rx_frame = 0x00000040, + magic_frame = 0x00000020, + wake_up_frame_en = 0x00000004, + magic_pkt_en = 0x00000002, + power_down = 0x00000001, +}; + +/* GMAC HW ADDR regs */ +#define GMAC_ADDR_HIGH(reg) (0x00000040+(reg * 8)) +#define GMAC_ADDR_LOW(reg) (0x00000044+(reg * 8)) +#define GMAC_MAX_UNICAST_ADDRESSES 16 + +#define GMAC_AN_CTRL 0x000000c0 /* AN control */ +#define GMAC_AN_STATUS 0x000000c4 /* AN status */ +#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ +#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ +#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ +#define GMAC_TBI 0x000000d4 /* TBI extend status */ +#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ + +/* GMAC Configuration defines */ +#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ +#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ +#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ +#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ +#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ +enum inter_frame_gap { + GMAC_CONTROL_IFG_88 = 0x00040000, + GMAC_CONTROL_IFG_80 = 0x00020000, + GMAC_CONTROL_IFG_40 = 0x000e0000, +}; +#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense during tx */ +#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ +#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ +#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ +#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ +#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ +#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ +#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ +#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ +#define GMAC_CONTROL_ACS 0x00000080 /* Automatic Pad Stripping */ +#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ +#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ + GMAC_CONTROL_IPC | GMAC_CONTROL_JE | GMAC_CONTROL_BE) + +/* GMAC Frame Filter defines */ +#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ +#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ +#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ +#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ +#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ +#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ +#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ +#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ +#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ +#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ +/* GMII ADDR defines */ +#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ +/* GMAC FLOW CTRL defines */ +#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define GMAC_FLOW_CTRL_PT_SHIFT 16 +#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ +#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ +#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ + +/*--- DMA BLOCK defines ---*/ +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +/* Programmable burst length (passed thorugh platform)*/ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 + +enum rx_tx_priority_ratio { + double_ratio = 0x00004000, /*2:1 */ + triple_ratio = 0x00008000, /*3:1 */ + quadruple_ratio = 0x0000c000, /*4:1 */ +}; + +#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ +#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ +#define DMA_BUS_MODE_RPBL_SHIFT 17 +#define DMA_BUS_MODE_USP 0x00800000 +#define DMA_BUS_MODE_4PBL 0x01000000 +#define DMA_BUS_MODE_AAL 0x02000000 + +/* DMA CRS Control and Status Register Mapping */ +#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ +#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ +/* DMA Bus Mode register defines */ +#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ +#define DMA_BUS_PR_RATIO_SHIFT 14 +#define DMA_BUS_FB 0x00010000 /* Fixed Burst */ + +/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ +#define DMA_CONTROL_DT 0x04000000 /* Disable Drop TCP/IP csum error */ +#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ +#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ +/* Theshold for Activating the FC */ +enum rfa { + act_full_minus_1 = 0x00800000, + act_full_minus_2 = 0x00800200, + act_full_minus_3 = 0x00800400, + act_full_minus_4 = 0x00800600, +}; +/* Theshold for Deactivating the FC */ +enum rfd { + deac_full_minus_1 = 0x00400000, + deac_full_minus_2 = 0x00400800, + deac_full_minus_3 = 0x00401000, + deac_full_minus_4 = 0x00401800, +}; +#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ +#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */ + +enum ttc_control { + DMA_CONTROL_TTC_64 = 0x00000000, + DMA_CONTROL_TTC_128 = 0x00004000, + DMA_CONTROL_TTC_192 = 0x00008000, + DMA_CONTROL_TTC_256 = 0x0000c000, + DMA_CONTROL_TTC_40 = 0x00010000, + DMA_CONTROL_TTC_32 = 0x00014000, + DMA_CONTROL_TTC_24 = 0x00018000, + DMA_CONTROL_TTC_16 = 0x0001c000, +}; +#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff + +#define DMA_CONTROL_EFC 0x00000100 +#define DMA_CONTROL_FEF 0x00000080 +#define DMA_CONTROL_FUF 0x00000040 + +enum rtc_control { + DMA_CONTROL_RTC_64 = 0x00000000, + DMA_CONTROL_RTC_32 = 0x00000008, + DMA_CONTROL_RTC_96 = 0x00000010, + DMA_CONTROL_RTC_128 = 0x00000018, +}; +#define DMA_CONTROL_TC_RX_MASK 0xffffffe7 + +#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ + +/* MMC registers offset */ +#define GMAC_MMC_CTRL 0x100 +#define GMAC_MMC_RX_INTR 0x104 +#define GMAC_MMC_TX_INTR 0x108 +#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/mac100.c new file mode 100644 index 00000000000..625171b6062 --- /dev/null +++ b/drivers/net/stmmac/mac100.c @@ -0,0 +1,517 @@ +/******************************************************************************* + This is the driver for the MAC 10/100 on-chip Ethernet controller + currently tested on all the ST boards based on STb7109 and stx7200 SoCs. + + DWC Ether MAC 10/100 Universal version 4.0 has been used for developing + this code. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "common.h" +#include "mac100.h" + +#undef MAC100_DEBUG +/*#define MAC100_DEBUG*/ +#ifdef MAC100_DEBUG +#define DBG(fmt, args...) printk(fmt, ## args) +#else +#define DBG(fmt, args...) do { } while (0) +#endif + +static void mac100_core_init(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CONTROL); + + writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL); + +#ifdef STMMAC_VLAN_TAG_USED + writel(ETH_P_8021Q, ioaddr + MAC_VLAN1); +#endif + return; +} + +static void mac100_dump_mac_regs(unsigned long ioaddr) +{ + pr_info("\t----------------------------------------------\n" + "\t MAC100 CSR (base addr = 0x%8x)\n" + "\t----------------------------------------------\n", + (unsigned int)ioaddr); + pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL, + readl(ioaddr + MAC_CONTROL)); + pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH, + readl(ioaddr + MAC_ADDR_HIGH)); + pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW, + readl(ioaddr + MAC_ADDR_LOW)); + pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n", + MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH)); + pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n", + MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW)); + pr_info("\tflow control (offset 0x%x): 0x%08x\n", + MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL)); + pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1, + readl(ioaddr + MAC_VLAN1)); + pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2, + readl(ioaddr + MAC_VLAN2)); + pr_info("\n\tMAC management counter registers\n"); + pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n", + MMC_CONTROL, readl(ioaddr + MMC_CONTROL)); + pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR)); + pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR)); + pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK)); + pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n", + MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK)); + return; +} + +static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, + u32 dma_rx) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)); + + /* Enable Application Access by writing to DMA CSR0 */ + writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), + ioaddr + DMA_BUS_MODE); + + /* Mask interrupts by writing to CSR7 */ + writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); + + /* The base address of the RX/TX descriptor lists must be written into + * DMA CSR3 and CSR4, respectively. */ + writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); + writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); + + return 0; +} + +/* Store and Forward capability is not used at all.. + * The transmit threshold can be programmed by + * setting the TTC bits in the DMA control register.*/ +static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode, + int rxmode) +{ + u32 csr6 = readl(ioaddr + DMA_CONTROL); + + if (txmode <= 32) + csr6 |= DMA_CONTROL_TTC_32; + else if (txmode <= 64) + csr6 |= DMA_CONTROL_TTC_64; + else + csr6 |= DMA_CONTROL_TTC_128; + + writel(csr6, ioaddr + DMA_CONTROL); + + return; +} + +static void mac100_dump_dma_regs(unsigned long ioaddr) +{ + int i; + + DBG(KERN_DEBUG "MAC100 DMA CSR \n"); + for (i = 0; i < 9; i++) + pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i, + (DMA_BUS_MODE + i * 4), + readl(ioaddr + DMA_BUS_MODE + i * 4)); + DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n", + DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR)); + DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n", + DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR)); + return; +} + +/* DMA controller has two counters to track the number of + the receive missed frames. */ +static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, + unsigned long ioaddr) +{ + struct net_device_stats *stats = (struct net_device_stats *)data; + u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR); + + if (unlikely(csr8)) { + if (csr8 & DMA_MISSED_FRAME_OVE) { + stats->rx_over_errors += 0x800; + x->rx_overflow_cntr += 0x800; + } else { + unsigned int ove_cntr; + ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17); + stats->rx_over_errors += ove_cntr; + x->rx_overflow_cntr += ove_cntr; + } + + if (csr8 & DMA_MISSED_FRAME_OVE_M) { + stats->rx_missed_errors += 0xffff; + x->rx_missed_cntr += 0xffff; + } else { + unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR); + stats->rx_missed_errors += miss_f; + x->rx_missed_cntr += miss_f; + } + } + return; +} + +static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p, unsigned long ioaddr) +{ + int ret = 0; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.tx.error_summary)) { + if (unlikely(p->des01.tx.underflow_error)) { + x->tx_underflow++; + stats->tx_fifo_errors++; + } + if (unlikely(p->des01.tx.no_carrier)) { + x->tx_carrier++; + stats->tx_carrier_errors++; + } + if (unlikely(p->des01.tx.loss_carrier)) { + x->tx_losscarrier++; + stats->tx_carrier_errors++; + } + if (unlikely((p->des01.tx.excessive_deferral) || + (p->des01.tx.excessive_collisions) || + (p->des01.tx.late_collision))) + stats->collisions += p->des01.tx.collision_count; + ret = -1; + } + if (unlikely(p->des01.tx.heartbeat_fail)) { + x->tx_heartbeat++; + stats->tx_heartbeat_errors++; + ret = -1; + } + if (unlikely(p->des01.tx.deferred)) + x->tx_deferred++; + + return ret; +} + +static int mac100_get_tx_len(struct dma_desc *p) +{ + return p->des01.tx.buffer1_size; +} + +/* This function verifies if each incoming frame has some errors + * and, if required, updates the multicast statistics. + * In case of success, it returns csum_none becasue the device + * is not able to compute the csum in HW. */ +static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x, + struct dma_desc *p) +{ + int ret = csum_none; + struct net_device_stats *stats = (struct net_device_stats *)data; + + if (unlikely(p->des01.rx.last_descriptor == 0)) { + pr_warning("mac100 Error: Oversized Ethernet " + "frame spanned multiple buffers\n"); + stats->rx_length_errors++; + return discard_frame; + } + + if (unlikely(p->des01.rx.error_summary)) { + if (unlikely(p->des01.rx.descriptor_error)) + x->rx_desc++; + if (unlikely(p->des01.rx.partial_frame_error)) + x->rx_partial++; + if (unlikely(p->des01.rx.run_frame)) + x->rx_runt++; + if (unlikely(p->des01.rx.frame_too_long)) + x->rx_toolong++; + if (unlikely(p->des01.rx.collision)) { + x->rx_collision++; + stats->collisions++; + } + if (unlikely(p->des01.rx.crc_error)) { + x->rx_crc++; + stats->rx_crc_errors++; + } + ret = discard_frame; + } + if (unlikely(p->des01.rx.dribbling)) + ret = discard_frame; + + if (unlikely(p->des01.rx.length_error)) { + x->rx_lenght++; + ret = discard_frame; + } + if (unlikely(p->des01.rx.mii_error)) { + x->rx_mii++; + ret = discard_frame; + } + if (p->des01.rx.multicast_frame) { + x->rx_multicast++; + stats->multicast++; + } + return ret; +} + +static void mac100_irq_status(unsigned long ioaddr) +{ + return; +} + +static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr, + unsigned int reg_n) +{ + stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW); +} + +static void mac100_set_filter(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + u32 value = readl(ioaddr + MAC_CONTROL); + + if (dev->flags & IFF_PROMISC) { + value |= MAC_CONTROL_PR; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO | + MAC_CONTROL_HP); + } else if ((dev->mc_count > HASH_TABLE_SIZE) + || (dev->flags & IFF_ALLMULTI)) { + value |= MAC_CONTROL_PM; + value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO); + writel(0xffffffff, ioaddr + MAC_HASH_HIGH); + writel(0xffffffff, ioaddr + MAC_HASH_LOW); + } else if (dev->mc_count == 0) { /* no multicast */ + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF | + MAC_CONTROL_HO | MAC_CONTROL_HP); + } else { + int i; + u32 mc_filter[2]; + struct dev_mc_list *mclist; + + /* Perfect filter mode for physical address and Hash + filter for multicast */ + value |= MAC_CONTROL_HP; + value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF + | MAC_CONTROL_HO); + + memset(mc_filter, 0, sizeof(mc_filter)); + for (i = 0, mclist = dev->mc_list; + mclist && i < dev->mc_count; i++, mclist = mclist->next) { + /* The upper 6 bits of the calculated CRC are used to + * index the contens of the hash table */ + int bit_nr = + ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + /* The most significant bit determines the register to + * use (H/L) while the other 5 bits determine the bit + * within the register. */ + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + } + writel(mc_filter[0], ioaddr + MAC_HASH_LOW); + writel(mc_filter[1], ioaddr + MAC_HASH_HIGH); + } + + writel(value, ioaddr + MAC_CONTROL); + + DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: " + "HI 0x%08x, LO 0x%08x\n", + __func__, readl(ioaddr + MAC_CONTROL), + readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW)); + return; +} + +static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex, + unsigned int fc, unsigned int pause_time) +{ + unsigned int flow = MAC_FLOW_CTRL_ENABLE; + + if (duplex) + flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT); + writel(flow, ioaddr + MAC_FLOW_CTRL); + + return; +} + +/* No PMT module supported in our SoC for the Ethernet Controller. */ +static void mac100_pmt(unsigned long ioaddr, unsigned long mode) +{ + return; +} + +static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size, + int disable_rx_ic) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + if (i == ring_size - 1) + p->des01.rx.end_ring = 1; + if (disable_rx_ic) + p->des01.rx.disable_ic = 1; + p++; + } + return; +} + +static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size) +{ + int i; + for (i = 0; i < ring_size; i++) { + p->des01.tx.own = 0; + if (i == ring_size - 1) + p->des01.tx.end_ring = 1; + p++; + } + return; +} + +static int mac100_get_tx_owner(struct dma_desc *p) +{ + return p->des01.tx.own; +} + +static int mac100_get_rx_owner(struct dma_desc *p) +{ + return p->des01.rx.own; +} + +static void mac100_set_tx_owner(struct dma_desc *p) +{ + p->des01.tx.own = 1; +} + +static void mac100_set_rx_owner(struct dma_desc *p) +{ + p->des01.rx.own = 1; +} + +static int mac100_get_tx_ls(struct dma_desc *p) +{ + return p->des01.tx.last_segment; +} + +static void mac100_release_tx_desc(struct dma_desc *p) +{ + int ter = p->des01.tx.end_ring; + + /* clean field used within the xmit */ + p->des01.tx.first_segment = 0; + p->des01.tx.last_segment = 0; + p->des01.tx.buffer1_size = 0; + + /* clean status reported */ + p->des01.tx.error_summary = 0; + p->des01.tx.underflow_error = 0; + p->des01.tx.no_carrier = 0; + p->des01.tx.loss_carrier = 0; + p->des01.tx.excessive_deferral = 0; + p->des01.tx.excessive_collisions = 0; + p->des01.tx.late_collision = 0; + p->des01.tx.heartbeat_fail = 0; + p->des01.tx.deferred = 0; + + /* set termination field */ + p->des01.tx.end_ring = ter; + + return; +} + +static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, + int csum_flag) +{ + p->des01.tx.first_segment = is_fs; + p->des01.tx.buffer1_size = len; +} + +static void mac100_clear_tx_ic(struct dma_desc *p) +{ + p->des01.tx.interrupt = 0; +} + +static void mac100_close_tx_desc(struct dma_desc *p) +{ + p->des01.tx.last_segment = 1; + p->des01.tx.interrupt = 1; +} + +static int mac100_get_rx_frame_len(struct dma_desc *p) +{ + return p->des01.rx.frame_length; +} + +struct stmmac_ops mac100_driver = { + .core_init = mac100_core_init, + .dump_mac_regs = mac100_dump_mac_regs, + .dma_init = mac100_dma_init, + .dump_dma_regs = mac100_dump_dma_regs, + .dma_mode = mac100_dma_operation_mode, + .dma_diagnostic_fr = mac100_dma_diagnostic_fr, + .tx_status = mac100_get_tx_frame_status, + .rx_status = mac100_get_rx_frame_status, + .get_tx_len = mac100_get_tx_len, + .set_filter = mac100_set_filter, + .flow_ctrl = mac100_flow_ctrl, + .pmt = mac100_pmt, + .init_rx_desc = mac100_init_rx_desc, + .init_tx_desc = mac100_init_tx_desc, + .get_tx_owner = mac100_get_tx_owner, + .get_rx_owner = mac100_get_rx_owner, + .release_tx_desc = mac100_release_tx_desc, + .prepare_tx_desc = mac100_prepare_tx_desc, + .clear_tx_ic = mac100_clear_tx_ic, + .close_tx_desc = mac100_close_tx_desc, + .get_tx_ls = mac100_get_tx_ls, + .set_tx_owner = mac100_set_tx_owner, + .set_rx_owner = mac100_set_rx_owner, + .get_rx_frame_len = mac100_get_rx_frame_len, + .host_irq_status = mac100_irq_status, + .set_umac_addr = mac100_set_umac_addr, + .get_umac_addr = mac100_get_umac_addr, +}; + +struct mac_device_info *mac100_setup(unsigned long ioaddr) +{ + struct mac_device_info *mac; + + mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); + + pr_info("\tMAC 10/100\n"); + + mac->ops = &mac100_driver; + mac->hw.pmt = PMT_NOT_SUPPORTED; + mac->hw.link.port = MAC_CONTROL_PS; + mac->hw.link.duplex = MAC_CONTROL_F; + mac->hw.link.speed = 0; + mac->hw.mii.addr = MAC_MII_ADDR; + mac->hw.mii.data = MAC_MII_DATA; + + return mac; +} diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/mac100.h new file mode 100644 index 00000000000..0f8f110d004 --- /dev/null +++ b/drivers/net/stmmac/mac100.h @@ -0,0 +1,116 @@ +/******************************************************************************* + MAC 10/100 Header File + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +/*---------------------------------------------------------------------------- + * MAC BLOCK defines + *---------------------------------------------------------------------------*/ +/* MAC CSR offset */ +#define MAC_CONTROL 0x00000000 /* MAC Control */ +#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */ +#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */ +#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */ +#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */ +#define MAC_MII_ADDR 0x00000014 /* MII Address */ +#define MAC_MII_DATA 0x00000018 /* MII Data */ +#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */ +#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */ +#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */ + +/* MAC CTRL defines */ +#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */ +#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */ +#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */ +#define MAC_CONTROL_PS 0x08000000 /* Port Select */ +#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */ +#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */ +#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */ +#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */ +#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */ +#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */ +#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */ +#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */ +#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */ +#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */ +#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */ +#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */ +#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */ +#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */ +#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */ +#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */ +#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */ +#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */ +#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */ +#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ +#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */ + +#define MAC_CORE_INIT (MAC_CONTROL_HBD | MAC_CONTROL_ASTP) + +/* MAC FLOW CTRL defines */ +#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ +#define MAC_FLOW_CTRL_PT_SHIFT 16 +#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */ +#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */ +#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */ + +/* MII ADDR defines */ +#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ +#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ + +/*---------------------------------------------------------------------------- + * DMA BLOCK defines + *---------------------------------------------------------------------------*/ + +/* DMA Bus Mode register defines */ +#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */ +#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */ +#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ +#define DMA_BUS_MODE_PBL_SHIFT 8 +#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ +#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ +#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ +#define DMA_BUS_MODE_DEFAULT 0x00000000 + +/* DMA Control register defines */ +#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */ + +/* Transmit Threshold Control */ +enum ttc_control { + DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */ + DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */ + DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */ + DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */ + DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */ + DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */ + DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */ + DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */ + DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */ +}; + +/* STMAC110 DMA Missed Frame Counter register defines */ +#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */ +#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ +#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ +#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h new file mode 100644 index 00000000000..6d2eae3040e --- /dev/null +++ b/drivers/net/stmmac/stmmac.h @@ -0,0 +1,98 @@ +/******************************************************************************* + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#define DRV_MODULE_VERSION "Oct_09" + +#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#define STMMAC_VLAN_TAG_USED +#include <linux/if_vlan.h> +#endif + +#include "common.h" +#ifdef CONFIG_STMMAC_TIMER +#include "stmmac_timer.h" +#endif + +struct stmmac_priv { + /* Frequently used values are kept adjacent for cache effect */ + struct dma_desc *dma_tx ____cacheline_aligned; + dma_addr_t dma_tx_phy; + struct sk_buff **tx_skbuff; + unsigned int cur_tx; + unsigned int dirty_tx; + unsigned int dma_tx_size; + int tx_coe; + int tx_coalesce; + + struct dma_desc *dma_rx ; + unsigned int cur_rx; + unsigned int dirty_rx; + struct sk_buff **rx_skbuff; + dma_addr_t *rx_skbuff_dma; + struct sk_buff_head rx_recycle; + + struct net_device *dev; + int is_gmac; + dma_addr_t dma_rx_phy; + unsigned int dma_rx_size; + int rx_csum; + unsigned int dma_buf_sz; + struct device *device; + struct mac_device_info *mac_type; + + struct stmmac_extra_stats xstats; + struct napi_struct napi; + + phy_interface_t phy_interface; + int pbl; + int bus_id; + int phy_addr; + int phy_mask; + int (*phy_reset) (void *priv); + void (*fix_mac_speed) (void *priv, unsigned int speed); + void *bsp_priv; + + int phy_irq; + struct phy_device *phydev; + int oldlink; + int speed; + int oldduplex; + unsigned int flow_ctrl; + unsigned int pause; + struct mii_bus *mii; + + u32 msg_enable; + spinlock_t lock; + int wolopts; + int wolenabled; + int shutdown; +#ifdef CONFIG_STMMAC_TIMER + struct stmmac_timer *tm; +#endif +#ifdef STMMAC_VLAN_TAG_USED + struct vlan_group *vlgrp; +#endif +}; + +extern int stmmac_mdio_unregister(struct net_device *ndev); +extern int stmmac_mdio_register(struct net_device *ndev); +extern void stmmac_set_ethtool_ops(struct net_device *netdev); diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c new file mode 100644 index 00000000000..694ebe6a075 --- /dev/null +++ b/drivers/net/stmmac/stmmac_ethtool.c @@ -0,0 +1,395 @@ +/******************************************************************************* + STMMAC Ethtool support + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" + +#define REG_SPACE_SIZE 0x1054 +#define MAC100_ETHTOOL_NAME "st_mac100" +#define GMAC_ETHTOOL_NAME "st_gmac" + +struct stmmac_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; +}; + +#define STMMAC_STAT(m) \ + { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ + offsetof(struct stmmac_priv, xstats.m)} + +static const struct stmmac_stats stmmac_gstrings_stats[] = { + STMMAC_STAT(tx_underflow), + STMMAC_STAT(tx_carrier), + STMMAC_STAT(tx_losscarrier), + STMMAC_STAT(tx_heartbeat), + STMMAC_STAT(tx_deferred), + STMMAC_STAT(tx_vlan), + STMMAC_STAT(rx_vlan), + STMMAC_STAT(tx_jabber), + STMMAC_STAT(tx_frame_flushed), + STMMAC_STAT(tx_payload_error), + STMMAC_STAT(tx_ip_header_error), + STMMAC_STAT(rx_desc), + STMMAC_STAT(rx_partial), + STMMAC_STAT(rx_runt), + STMMAC_STAT(rx_toolong), + STMMAC_STAT(rx_collision), + STMMAC_STAT(rx_crc), + STMMAC_STAT(rx_lenght), + STMMAC_STAT(rx_mii), + STMMAC_STAT(rx_multicast), + STMMAC_STAT(rx_gmac_overflow), + STMMAC_STAT(rx_watchdog), + STMMAC_STAT(da_rx_filter_fail), + STMMAC_STAT(sa_rx_filter_fail), + STMMAC_STAT(rx_missed_cntr), + STMMAC_STAT(rx_overflow_cntr), + STMMAC_STAT(tx_undeflow_irq), + STMMAC_STAT(tx_process_stopped_irq), + STMMAC_STAT(tx_jabber_irq), + STMMAC_STAT(rx_overflow_irq), + STMMAC_STAT(rx_buf_unav_irq), + STMMAC_STAT(rx_process_stopped_irq), + STMMAC_STAT(rx_watchdog_irq), + STMMAC_STAT(tx_early_irq), + STMMAC_STAT(fatal_bus_error_irq), + STMMAC_STAT(threshold), + STMMAC_STAT(tx_pkt_n), + STMMAC_STAT(rx_pkt_n), + STMMAC_STAT(poll_n), + STMMAC_STAT(sched_timer_n), + STMMAC_STAT(normal_irq_n), +}; +#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) + +void stmmac_ethtool_getdrvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + if (!priv->is_gmac) + strcpy(info->driver, MAC100_ETHTOOL_NAME); + else + strcpy(info->driver, GMAC_ETHTOOL_NAME); + + strcpy(info->version, DRV_MODULE_VERSION); + info->fw_version[0] = '\0'; + info->n_stats = STMMAC_STATS_LEN; + return; +} + +int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + if (phy == NULL) { + pr_err("%s: %s: PHY is not registered\n", + __func__, dev->name); + return -ENODEV; + } + if (!netif_running(dev)) { + pr_err("%s: interface is disabled: we cannot track " + "link speed / duplex setting\n", dev->name); + return -EBUSY; + } + cmd->transceiver = XCVR_INTERNAL; + spin_lock_irq(&priv->lock); + rc = phy_ethtool_gset(phy, cmd); + spin_unlock_irq(&priv->lock); + return rc; +} + +int stmmac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phy = priv->phydev; + int rc; + + spin_lock(&priv->lock); + rc = phy_ethtool_sset(phy, cmd); + spin_unlock(&priv->lock); + + return rc; +} + +u32 stmmac_ethtool_getmsglevel(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + return priv->msg_enable; +} + +void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level) +{ + struct stmmac_priv *priv = netdev_priv(dev); + priv->msg_enable = level; + +} + +int stmmac_check_if_running(struct net_device *dev) +{ + if (!netif_running(dev)) + return -EBUSY; + return 0; +} + +int stmmac_ethtool_get_regs_len(struct net_device *dev) +{ + return REG_SPACE_SIZE; +} + +void stmmac_ethtool_gregs(struct net_device *dev, + struct ethtool_regs *regs, void *space) +{ + int i; + u32 *reg_space = (u32 *) space; + + struct stmmac_priv *priv = netdev_priv(dev); + + memset(reg_space, 0x0, REG_SPACE_SIZE); + + if (!priv->is_gmac) { + /* MAC registers */ + for (i = 0; i < 12; i++) + reg_space[i] = readl(dev->base_addr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 9; i++) + reg_space[i + 12] = + readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); + reg_space[22] = readl(dev->base_addr + DMA_CUR_TX_BUF_ADDR); + reg_space[23] = readl(dev->base_addr + DMA_CUR_RX_BUF_ADDR); + } else { + /* MAC registers */ + for (i = 0; i < 55; i++) + reg_space[i] = readl(dev->base_addr + (i * 4)); + /* DMA registers */ + for (i = 0; i < 22; i++) + reg_space[i + 55] = + readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); + } + + return; +} + +int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) +{ + if (data) + netdev->features |= NETIF_F_HW_CSUM; + else + netdev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} + +u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + return priv->rx_csum; +} + +static void +stmmac_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + + spin_lock(&priv->lock); + + pause->rx_pause = 0; + pause->tx_pause = 0; + pause->autoneg = priv->phydev->autoneg; + + if (priv->flow_ctrl & FLOW_RX) + pause->rx_pause = 1; + if (priv->flow_ctrl & FLOW_TX) + pause->tx_pause = 1; + + spin_unlock(&priv->lock); + return; +} + +static int +stmmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct stmmac_priv *priv = netdev_priv(netdev); + struct phy_device *phy = priv->phydev; + int new_pause = FLOW_OFF; + int ret = 0; + + spin_lock(&priv->lock); + + if (pause->rx_pause) + new_pause |= FLOW_RX; + if (pause->tx_pause) + new_pause |= FLOW_TX; + + priv->flow_ctrl = new_pause; + + if (phy->autoneg) { + if (netif_running(netdev)) { + struct ethtool_cmd cmd; + /* auto-negotiation automatically restarted */ + cmd.cmd = ETHTOOL_NWAY_RST; + cmd.supported = phy->supported; + cmd.advertising = phy->advertising; + cmd.autoneg = phy->autoneg; + cmd.speed = phy->speed; + cmd.duplex = phy->duplex; + cmd.phy_address = phy->addr; + ret = phy_ethtool_sset(phy, &cmd); + } + } else { + unsigned long ioaddr = netdev->base_addr; + priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex, + priv->flow_ctrl, priv->pause); + } + spin_unlock(&priv->lock); + return ret; +} + +static void stmmac_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *dummy, u64 *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int i; + + /* Update HW stats if supported */ + priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats, + ioaddr); + + for (i = 0; i < STMMAC_STATS_LEN; i++) { + char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset; + data[i] = (stmmac_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); + } + + return; +} + +static int stmmac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return STMMAC_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + u8 *p = data; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < STMMAC_STATS_LEN; i++) { + memcpy(p, stmmac_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + default: + WARN_ON(1); + break; + } + return; +} + +/* Currently only support WOL through Magic packet. */ +static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + spin_lock_irq(&priv->lock); + if (priv->wolenabled == PMT_SUPPORTED) { + wol->supported = WAKE_MAGIC; + wol->wolopts = priv->wolopts; + } + spin_unlock_irq(&priv->lock); +} + +static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct stmmac_priv *priv = netdev_priv(dev); + u32 support = WAKE_MAGIC; + + if (priv->wolenabled == PMT_NOT_SUPPORTED) + return -EINVAL; + + if (wol->wolopts & ~support) + return -EINVAL; + + if (wol->wolopts == 0) + device_set_wakeup_enable(priv->device, 0); + else + device_set_wakeup_enable(priv->device, 1); + + spin_lock_irq(&priv->lock); + priv->wolopts = wol->wolopts; + spin_unlock_irq(&priv->lock); + + return 0; +} + +static struct ethtool_ops stmmac_ethtool_ops = { + .begin = stmmac_check_if_running, + .get_drvinfo = stmmac_ethtool_getdrvinfo, + .get_settings = stmmac_ethtool_getsettings, + .set_settings = stmmac_ethtool_setsettings, + .get_msglevel = stmmac_ethtool_getmsglevel, + .set_msglevel = stmmac_ethtool_setmsglevel, + .get_regs = stmmac_ethtool_gregs, + .get_regs_len = stmmac_ethtool_get_regs_len, + .get_link = ethtool_op_get_link, + .get_rx_csum = stmmac_ethtool_get_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = stmmac_ethtool_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_pauseparam = stmmac_get_pauseparam, + .set_pauseparam = stmmac_set_pauseparam, + .get_ethtool_stats = stmmac_get_ethtool_stats, + .get_strings = stmmac_get_strings, + .get_wol = stmmac_get_wol, + .set_wol = stmmac_set_wol, + .get_sset_count = stmmac_get_sset_count, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, +#endif +}; + +void stmmac_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops); +} diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c new file mode 100644 index 00000000000..c2f14dc9ba2 --- /dev/null +++ b/drivers/net/stmmac/stmmac_main.c @@ -0,0 +1,2204 @@ +/******************************************************************************* + This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers. + ST Ethernet IPs are built around a Synopsys IP Core. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> + + Documentation available at: + http://www.stlinux.com + Support available at: + https://bugzilla.stlinux.com/ +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/crc32.h> +#include <linux/mii.h> +#include <linux/phy.h> +#include <linux/if_vlan.h> +#include <linux/dma-mapping.h> +#include <linux/stm/soc.h> +#include "stmmac.h" + +#define STMMAC_RESOURCE_NAME "stmmaceth" +#define PHY_RESOURCE_NAME "stmmacphy" + +#undef STMMAC_DEBUG +/*#define STMMAC_DEBUG*/ +#ifdef STMMAC_DEBUG +#define DBG(nlevel, klevel, fmt, args...) \ + ((void)(netif_msg_##nlevel(priv) && \ + printk(KERN_##klevel fmt, ## args))) +#else +#define DBG(nlevel, klevel, fmt, args...) do { } while (0) +#endif + +#undef STMMAC_RX_DEBUG +/*#define STMMAC_RX_DEBUG*/ +#ifdef STMMAC_RX_DEBUG +#define RX_DBG(fmt, args...) printk(fmt, ## args) +#else +#define RX_DBG(fmt, args...) do { } while (0) +#endif + +#undef STMMAC_XMIT_DEBUG +/*#define STMMAC_XMIT_DEBUG*/ +#ifdef STMMAC_TX_DEBUG +#define TX_DBG(fmt, args...) printk(fmt, ## args) +#else +#define TX_DBG(fmt, args...) do { } while (0) +#endif + +#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) +#define JUMBO_LEN 9000 + +/* Module parameters */ +#define TX_TIMEO 5000 /* default 5 seconds */ +static int watchdog = TX_TIMEO; +module_param(watchdog, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds"); + +static int debug = -1; /* -1: default, 0: no output, 16: all */ +module_param(debug, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)"); + +static int phyaddr = -1; +module_param(phyaddr, int, S_IRUGO); +MODULE_PARM_DESC(phyaddr, "Physical device address"); + +#define DMA_TX_SIZE 256 +static int dma_txsize = DMA_TX_SIZE; +module_param(dma_txsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); + +#define DMA_RX_SIZE 256 +static int dma_rxsize = DMA_RX_SIZE; +module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); + +static int flow_ctrl = FLOW_OFF; +module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]"); + +static int pause = PAUSE_TIME; +module_param(pause, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(pause, "Flow Control Pause Time"); + +#define TC_DEFAULT 64 +static int tc = TC_DEFAULT; +module_param(tc, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tc, "DMA threshold control value"); + +#define RX_NO_COALESCE 1 /* Always interrupt on completion */ +#define TX_NO_COALESCE -1 /* No moderation by default */ + +/* Pay attention to tune this parameter; take care of both + * hardware capability and network stabitily/performance impact. + * Many tests showed that ~4ms latency seems to be good enough. */ +#ifdef CONFIG_STMMAC_TIMER +#define DEFAULT_PERIODIC_RATE 256 +static int tmrate = DEFAULT_PERIODIC_RATE; +module_param(tmrate, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tmrate, "External timer freq. (default: 256Hz)"); +#endif + +#define DMA_BUFFER_SIZE BUF_SIZE_2KiB +static int buf_sz = DMA_BUFFER_SIZE; +module_param(buf_sz, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(buf_sz, "DMA buffer size"); + +/* In case of Giga ETH, we can enable/disable the COE for the + * transmit HW checksum computation. + * Note that, if tx csum is off in HW, SG will be still supported. */ +static int tx_coe = HW_CSUM; +module_param(tx_coe, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(tx_coe, "GMAC COE type 2 [on/off]"); + +static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id); +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev); + +/** + * stmmac_verify_args - verify the driver parameters. + * Description: it verifies if some wrong parameter is passed to the driver. + * Note that wrong parameters are replaced with the default values. + */ +static void stmmac_verify_args(void) +{ + if (unlikely(watchdog < 0)) + watchdog = TX_TIMEO; + if (unlikely(dma_rxsize < 0)) + dma_rxsize = DMA_RX_SIZE; + if (unlikely(dma_txsize < 0)) + dma_txsize = DMA_TX_SIZE; + if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DMA_BUFFER_SIZE; + if (unlikely(flow_ctrl > 1)) + flow_ctrl = FLOW_AUTO; + else if (likely(flow_ctrl < 0)) + flow_ctrl = FLOW_OFF; + if (unlikely((pause < 0) || (pause > 0xffff))) + pause = PAUSE_TIME; + + return; +} + +#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) +static void print_pkt(unsigned char *buf, int len) +{ + int j; + pr_info("len = %d byte, buf addr: 0x%p", len, buf); + for (j = 0; j < len; j++) { + if ((j % 16) == 0) + pr_info("\n %03x:", j); + pr_info(" %02x", buf[j]); + } + pr_info("\n"); + return; +} +#endif + +/* minimum number of free TX descriptors required to wake up TX process */ +#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) + +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) +{ + return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; +} + +/** + * stmmac_adjust_link + * @dev: net device structure + * Description: it adjusts the link parameters. + */ +static void stmmac_adjust_link(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev = priv->phydev; + unsigned long ioaddr = dev->base_addr; + unsigned long flags; + int new_state = 0; + unsigned int fc = priv->flow_ctrl, pause_time = priv->pause; + + if (phydev == NULL) + return; + + DBG(probe, DEBUG, "stmmac_adjust_link: called. address %d link %d\n", + phydev->addr, phydev->link); + + spin_lock_irqsave(&priv->lock, flags); + if (phydev->link) { + u32 ctrl = readl(ioaddr + MAC_CTRL_REG); + + /* Now we make sure that we can be in full duplex mode. + * If not, we operate in half-duplex mode. */ + if (phydev->duplex != priv->oldduplex) { + new_state = 1; + if (!(phydev->duplex)) + ctrl &= ~priv->mac_type->hw.link.duplex; + else + ctrl |= priv->mac_type->hw.link.duplex; + priv->oldduplex = phydev->duplex; + } + /* Flow Control operation */ + if (phydev->pause) + priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex, + fc, pause_time); + + if (phydev->speed != priv->speed) { + new_state = 1; + switch (phydev->speed) { + case 1000: + if (likely(priv->is_gmac)) + ctrl &= ~priv->mac_type->hw.link.port; + break; + case 100: + case 10: + if (priv->is_gmac) { + ctrl |= priv->mac_type->hw.link.port; + if (phydev->speed == SPEED_100) { + ctrl |= + priv->mac_type->hw.link. + speed; + } else { + ctrl &= + ~(priv->mac_type->hw. + link.speed); + } + } else { + ctrl &= ~priv->mac_type->hw.link.port; + } + priv->fix_mac_speed(priv->bsp_priv, + phydev->speed); + break; + default: + if (netif_msg_link(priv)) + pr_warning("%s: Speed (%d) is not 10" + " or 100!\n", dev->name, phydev->speed); + break; + } + + priv->speed = phydev->speed; + } + + writel(ctrl, ioaddr + MAC_CTRL_REG); + + if (!priv->oldlink) { + new_state = 1; + priv->oldlink = 1; + } + } else if (priv->oldlink) { + new_state = 1; + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + } + + if (new_state && netif_msg_link(priv)) + phy_print_status(phydev); + + spin_unlock_irqrestore(&priv->lock, flags); + + DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); +} + +/** + * stmmac_init_phy - PHY initialization + * @dev: net device structure + * Description: it initializes the driver's PHY state, and attaches the PHY + * to the mac driver. + * Return value: + * 0 on success + */ +static int stmmac_init_phy(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + struct phy_device *phydev; + char phy_id[BUS_ID_SIZE]; /* PHY to connect */ + char bus_id[BUS_ID_SIZE]; + + priv->oldlink = 0; + priv->speed = 0; + priv->oldduplex = -1; + + if (priv->phy_addr == -1) { + /* We don't have a PHY, so do nothing */ + return 0; + } + + snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); + snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr); + pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); + + phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, + priv->phy_interface); + + if (IS_ERR(phydev)) { + pr_err("%s: Could not attach to PHY\n", dev->name); + return PTR_ERR(phydev); + } + + /* + * Broken HW is sometimes missing the pull-up resistor on the + * MDIO line, which results in reads to non-existent devices returning + * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent + * device as well. + * Note: phydev->phy_id is the result of reading the UID PHY registers. + */ + if (phydev->phy_id == 0) { + phy_disconnect(phydev); + return -ENODEV; + } + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" + " Link = %d\n", dev->name, phydev->phy_id, phydev->link); + + priv->phydev = phydev; + + return 0; +} + +static inline void stmmac_mac_enable_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value |= MAC_RNABLE_RX; + /* Set the RE (receive enable bit into the MAC CTRL register). */ + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_enable_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value |= MAC_ENABLE_TX; + /* Set the TE (transmit enable bit into the MAC CTRL register). */ + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_disable_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value &= ~MAC_RNABLE_RX; + writel(value, ioaddr + MAC_CTRL_REG); +} + +static inline void stmmac_mac_disable_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + MAC_CTRL_REG); + value &= ~MAC_ENABLE_TX; + writel(value, ioaddr + MAC_CTRL_REG); +} + +/** + * display_ring + * @p: pointer to the ring. + * @size: size of the ring. + * Description: display all the descriptors within the ring. + */ +static void display_ring(struct dma_desc *p, int size) +{ + struct tmp_s { + u64 a; + unsigned int b; + unsigned int c; + }; + int i; + for (i = 0; i < size; i++) { + struct tmp_s *x = (struct tmp_s *)(p + i); + pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", + i, (unsigned int)virt_to_phys(&p[i]), + (unsigned int)(x->a), (unsigned int)((x->a) >> 32), + x->b, x->c); + pr_info("\n"); + } +} + +/** + * init_dma_desc_rings - init the RX/TX descriptor rings + * @dev: net device structure + * Description: this function initializes the DMA RX/TX descriptors + * and allocates the socket buffers. + */ +static void init_dma_desc_rings(struct net_device *dev) +{ + int i; + struct stmmac_priv *priv = netdev_priv(dev); + struct sk_buff *skb; + unsigned int txsize = priv->dma_tx_size; + unsigned int rxsize = priv->dma_rx_size; + unsigned int bfsize = priv->dma_buf_sz; + int buff2_needed = 0; + int dis_ic = 0; + +#ifdef CONFIG_STMMAC_TIMER + /* Using Timers disable interrupts on completion for the reception */ + dis_ic = 1; +#endif + /* Set the Buffer size according to the MTU; + * indeed, in case of jumbo we need to bump-up the buffer sizes. + */ + if (unlikely(dev->mtu >= BUF_SIZE_8KiB)) + bfsize = BUF_SIZE_16KiB; + else if (unlikely(dev->mtu >= BUF_SIZE_4KiB)) + bfsize = BUF_SIZE_8KiB; + else if (unlikely(dev->mtu >= BUF_SIZE_2KiB)) + bfsize = BUF_SIZE_4KiB; + else if (unlikely(dev->mtu >= DMA_BUFFER_SIZE)) + bfsize = BUF_SIZE_2KiB; + else + bfsize = DMA_BUFFER_SIZE; + + /* If the MTU exceeds 8k so use the second buffer in the chain */ + if (bfsize >= BUF_SIZE_8KiB) + buff2_needed = 1; + + DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", + txsize, rxsize, bfsize); + + priv->rx_skbuff_dma = kmalloc(rxsize * sizeof(dma_addr_t), GFP_KERNEL); + priv->rx_skbuff = + kmalloc(sizeof(struct sk_buff *) * rxsize, GFP_KERNEL); + priv->dma_rx = + (struct dma_desc *)dma_alloc_coherent(priv->device, + rxsize * + sizeof(struct dma_desc), + &priv->dma_rx_phy, + GFP_KERNEL); + priv->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * txsize, + GFP_KERNEL); + priv->dma_tx = + (struct dma_desc *)dma_alloc_coherent(priv->device, + txsize * + sizeof(struct dma_desc), + &priv->dma_tx_phy, + GFP_KERNEL); + + if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { + pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__); + return; + } + + DBG(probe, INFO, "stmmac (%s) DMA desc rings: virt addr (Rx %p, " + "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", + dev->name, priv->dma_rx, priv->dma_tx, + (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); + + /* RX INITIALIZATION */ + DBG(probe, INFO, "stmmac: SKB addresses:\n" + "skb\t\tskb data\tdma data\n"); + + for (i = 0; i < rxsize; i++) { + struct dma_desc *p = priv->dma_rx + i; + + skb = netdev_alloc_skb_ip_align(dev, bfsize); + if (unlikely(skb == NULL)) { + pr_err("%s: Rx init fails; skb is NULL\n", __func__); + break; + } + priv->rx_skbuff[i] = skb; + priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, + bfsize, DMA_FROM_DEVICE); + + p->des2 = priv->rx_skbuff_dma[i]; + if (unlikely(buff2_needed)) + p->des3 = p->des2 + BUF_SIZE_8KiB; + DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], + priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); + } + priv->cur_rx = 0; + priv->dirty_rx = (unsigned int)(i - rxsize); + priv->dma_buf_sz = bfsize; + buf_sz = bfsize; + + /* TX INITIALIZATION */ + for (i = 0; i < txsize; i++) { + priv->tx_skbuff[i] = NULL; + priv->dma_tx[i].des2 = 0; + } + priv->dirty_tx = 0; + priv->cur_tx = 0; + + /* Clear the Rx/Tx descriptors */ + priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize); + + if (netif_msg_hw(priv)) { + pr_info("RX descriptor ring:\n"); + display_ring(priv->dma_rx, rxsize); + pr_info("TX descriptor ring:\n"); + display_ring(priv->dma_tx, txsize); + } + return; +} + +static void dma_free_rx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_rx_size; i++) { + if (priv->rx_skbuff[i]) { + dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], + priv->dma_buf_sz, DMA_FROM_DEVICE); + dev_kfree_skb_any(priv->rx_skbuff[i]); + } + priv->rx_skbuff[i] = NULL; + } + return; +} + +static void dma_free_tx_skbufs(struct stmmac_priv *priv) +{ + int i; + + for (i = 0; i < priv->dma_tx_size; i++) { + if (priv->tx_skbuff[i] != NULL) { + struct dma_desc *p = priv->dma_tx + i; + if (p->des2) + dma_unmap_single(priv->device, p->des2, + priv->mac_type->ops->get_tx_len(p), + DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skbuff[i]); + priv->tx_skbuff[i] = NULL; + } + } + return; +} + +static void free_dma_desc_resources(struct stmmac_priv *priv) +{ + /* Release the DMA TX/RX socket buffers */ + dma_free_rx_skbufs(priv); + dma_free_tx_skbufs(priv); + + /* Free the region of consistent memory previously allocated for + * the DMA */ + dma_free_coherent(priv->device, + priv->dma_tx_size * sizeof(struct dma_desc), + priv->dma_tx, priv->dma_tx_phy); + dma_free_coherent(priv->device, + priv->dma_rx_size * sizeof(struct dma_desc), + priv->dma_rx, priv->dma_rx_phy); + kfree(priv->rx_skbuff_dma); + kfree(priv->rx_skbuff); + kfree(priv->tx_skbuff); + + return; +} + +/** + * stmmac_dma_start_tx + * @ioaddr: device I/O address + * Description: this function starts the DMA tx process. + */ +static void stmmac_dma_start_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + return; +} + +static void stmmac_dma_stop_tx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_ST; + writel(value, ioaddr + DMA_CONTROL); + return; +} + +/** + * stmmac_dma_start_rx + * @ioaddr: device I/O address + * Description: this function starts the DMA rx process. + */ +static void stmmac_dma_start_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value |= DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); + + return; +} + +static void stmmac_dma_stop_rx(unsigned long ioaddr) +{ + u32 value = readl(ioaddr + DMA_CONTROL); + value &= ~DMA_CONTROL_SR; + writel(value, ioaddr + DMA_CONTROL); + + return; +} + +/** + * stmmac_dma_operation_mode - HW DMA operation mode + * @priv : pointer to the private device structure. + * Description: it sets the DMA operation mode: tx/rx DMA thresholds + * or Store-And-Forward capability. It also verifies the COE for the + * transmission in case of Giga ETH. + */ +static void stmmac_dma_operation_mode(struct stmmac_priv *priv) +{ + if (!priv->is_gmac) { + /* MAC 10/100 */ + priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0); + priv->tx_coe = NO_HW_CSUM; + } else { + if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) { + priv->mac_type->ops->dma_mode(priv->dev->base_addr, + SF_DMA_MODE, SF_DMA_MODE); + tc = SF_DMA_MODE; + priv->tx_coe = HW_CSUM; + } else { + /* Checksum computation is performed in software. */ + priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, + SF_DMA_MODE); + priv->tx_coe = NO_HW_CSUM; + } + } + tx_coe = priv->tx_coe; + + return; +} + +#ifdef STMMAC_DEBUG +/** + * show_tx_process_state + * @status: tx descriptor status field + * Description: it shows the Transmit Process State for CSR5[22:20] + */ +static void show_tx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT; + + switch (state) { + case 0: + pr_info("- TX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_info("- TX (Running):Fetching the Tx desc\n"); + break; + case 2: + pr_info("- TX (Running): Waiting for end of tx\n"); + break; + case 3: + pr_info("- TX (Running): Reading the data " + "and queuing the data into the Tx buf\n"); + break; + case 6: + pr_info("- TX (Suspended): Tx Buff Underflow " + "or an unavailable Transmit descriptor\n"); + break; + case 7: + pr_info("- TX (Running): Closing Tx descriptor\n"); + break; + default: + break; + } + return; +} + +/** + * show_rx_process_state + * @status: rx descriptor status field + * Description: it shows the Receive Process State for CSR5[19:17] + */ +static void show_rx_process_state(unsigned int status) +{ + unsigned int state; + state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT; + + switch (state) { + case 0: + pr_info("- RX (Stopped): Reset or Stop command\n"); + break; + case 1: + pr_info("- RX (Running): Fetching the Rx desc\n"); + break; + case 2: + pr_info("- RX (Running):Checking for end of pkt\n"); + break; + case 3: + pr_info("- RX (Running): Waiting for Rx pkt\n"); + break; + case 4: + pr_info("- RX (Suspended): Unavailable Rx buf\n"); + break; + case 5: + pr_info("- RX (Running): Closing Rx descriptor\n"); + break; + case 6: + pr_info("- RX(Running): Flushing the current frame" + " from the Rx buf\n"); + break; + case 7: + pr_info("- RX (Running): Queuing the Rx frame" + " from the Rx buf into memory\n"); + break; + default: + break; + } + return; +} +#endif + +/** + * stmmac_tx: + * @priv: private driver structure + * Description: it reclaims resources after transmission completes. + */ +static void stmmac_tx(struct stmmac_priv *priv) +{ + unsigned int txsize = priv->dma_tx_size; + unsigned long ioaddr = priv->dev->base_addr; + + while (priv->dirty_tx != priv->cur_tx) { + int last; + unsigned int entry = priv->dirty_tx % txsize; + struct sk_buff *skb = priv->tx_skbuff[entry]; + struct dma_desc *p = priv->dma_tx + entry; + + /* Check if the descriptor is owned by the DMA. */ + if (priv->mac_type->ops->get_tx_owner(p)) + break; + + /* Verify tx error by looking at the last segment */ + last = priv->mac_type->ops->get_tx_ls(p); + if (likely(last)) { + int tx_error = + priv->mac_type->ops->tx_status(&priv->dev->stats, + &priv->xstats, + p, ioaddr); + if (likely(tx_error == 0)) { + priv->dev->stats.tx_packets++; + priv->xstats.tx_pkt_n++; + } else + priv->dev->stats.tx_errors++; + } + TX_DBG("%s: curr %d, dirty %d\n", __func__, + priv->cur_tx, priv->dirty_tx); + + if (likely(p->des2)) + dma_unmap_single(priv->device, p->des2, + priv->mac_type->ops->get_tx_len(p), + DMA_TO_DEVICE); + if (unlikely(p->des3)) + p->des3 = 0; + + if (likely(skb != NULL)) { + /* + * If there's room in the queue (limit it to size) + * we add this skb back into the pool, + * if it's the right size. + */ + if ((skb_queue_len(&priv->rx_recycle) < + priv->dma_rx_size) && + skb_recycle_check(skb, priv->dma_buf_sz)) + __skb_queue_head(&priv->rx_recycle, skb); + else + dev_kfree_skb(skb); + + priv->tx_skbuff[entry] = NULL; + } + + priv->mac_type->ops->release_tx_desc(p); + + entry = (++priv->dirty_tx) % txsize; + } + if (unlikely(netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + netif_tx_lock(priv->dev); + if (netif_queue_stopped(priv->dev) && + stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + TX_DBG("%s: restart transmit\n", __func__); + netif_wake_queue(priv->dev); + } + netif_tx_unlock(priv->dev); + } + return; +} + +static inline void stmmac_enable_irq(struct stmmac_priv *priv) +{ +#ifndef CONFIG_STMMAC_TIMER + writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA); +#else + priv->tm->timer_start(tmrate); +#endif +} + +static inline void stmmac_disable_irq(struct stmmac_priv *priv) +{ +#ifndef CONFIG_STMMAC_TIMER + writel(0, priv->dev->base_addr + DMA_INTR_ENA); +#else + priv->tm->timer_stop(); +#endif +} + +static int stmmac_has_work(struct stmmac_priv *priv) +{ + unsigned int has_work = 0; + int rxret, tx_work = 0; + + rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx + + (priv->cur_rx % priv->dma_rx_size)); + + if (priv->dirty_tx != priv->cur_tx) + tx_work = 1; + + if (likely(!rxret || tx_work)) + has_work = 1; + + return has_work; +} + +static inline void _stmmac_schedule(struct stmmac_priv *priv) +{ + if (likely(stmmac_has_work(priv))) { + stmmac_disable_irq(priv); + napi_schedule(&priv->napi); + } +} + +#ifdef CONFIG_STMMAC_TIMER +void stmmac_schedule(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + priv->xstats.sched_timer_n++; + + _stmmac_schedule(priv); + + return; +} + +static void stmmac_no_timer_started(unsigned int x) +{; +}; + +static void stmmac_no_timer_stopped(void) +{; +}; +#endif + +/** + * stmmac_tx_err: + * @priv: pointer to the private device structure + * Description: it cleans the descriptors and restarts the transmission + * in case of errors. + */ +static void stmmac_tx_err(struct stmmac_priv *priv) +{ + netif_stop_queue(priv->dev); + + stmmac_dma_stop_tx(priv->dev->base_addr); + dma_free_tx_skbufs(priv); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size); + priv->dirty_tx = 0; + priv->cur_tx = 0; + stmmac_dma_start_tx(priv->dev->base_addr); + + priv->dev->stats.tx_errors++; + netif_wake_queue(priv->dev); + + return; +} + +/** + * stmmac_dma_interrupt - Interrupt handler for the driver + * @dev: net device structure + * Description: Interrupt handler for the driver (DMA). + */ +static void stmmac_dma_interrupt(struct net_device *dev) +{ + unsigned long ioaddr = dev->base_addr; + struct stmmac_priv *priv = netdev_priv(dev); + /* read the status register (CSR5) */ + u32 intr_status = readl(ioaddr + DMA_STATUS); + + DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status); + +#ifdef STMMAC_DEBUG + /* It displays the DMA transmit process state (CSR5 register) */ + if (netif_msg_tx_done(priv)) + show_tx_process_state(intr_status); + if (netif_msg_rx_status(priv)) + show_rx_process_state(intr_status); +#endif + /* ABNORMAL interrupts */ + if (unlikely(intr_status & DMA_STATUS_AIS)) { + DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: "); + if (unlikely(intr_status & DMA_STATUS_UNF)) { + DBG(intr, INFO, "transmit underflow\n"); + if (unlikely(tc != SF_DMA_MODE) + && (tc <= 256)) { + /* Try to bump up the threshold */ + tc += 64; + priv->mac_type->ops->dma_mode(ioaddr, tc, + SF_DMA_MODE); + priv->xstats.threshold = tc; + } + stmmac_tx_err(priv); + priv->xstats.tx_undeflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TJT)) { + DBG(intr, INFO, "transmit jabber\n"); + priv->xstats.tx_jabber_irq++; + } + if (unlikely(intr_status & DMA_STATUS_OVF)) { + DBG(intr, INFO, "recv overflow\n"); + priv->xstats.rx_overflow_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RU)) { + DBG(intr, INFO, "receive buffer unavailable\n"); + priv->xstats.rx_buf_unav_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RPS)) { + DBG(intr, INFO, "receive process stopped\n"); + priv->xstats.rx_process_stopped_irq++; + } + if (unlikely(intr_status & DMA_STATUS_RWT)) { + DBG(intr, INFO, "receive watchdog\n"); + priv->xstats.rx_watchdog_irq++; + } + if (unlikely(intr_status & DMA_STATUS_ETI)) { + DBG(intr, INFO, "transmit early interrupt\n"); + priv->xstats.tx_early_irq++; + } + if (unlikely(intr_status & DMA_STATUS_TPS)) { + DBG(intr, INFO, "transmit process stopped\n"); + priv->xstats.tx_process_stopped_irq++; + stmmac_tx_err(priv); + } + if (unlikely(intr_status & DMA_STATUS_FBI)) { + DBG(intr, INFO, "fatal bus error\n"); + priv->xstats.fatal_bus_error_irq++; + stmmac_tx_err(priv); + } + } + + /* TX/RX NORMAL interrupts */ + if (intr_status & DMA_STATUS_NIS) { + priv->xstats.normal_irq_n++; + if (likely((intr_status & DMA_STATUS_RI) || + (intr_status & (DMA_STATUS_TI)))) + _stmmac_schedule(priv); + } + + /* Optional hardware blocks, interrupts should be disabled */ + if (unlikely(intr_status & + (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI))) + pr_info("%s: unexpected status %08x\n", __func__, intr_status); + + /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */ + writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS); + + DBG(intr, INFO, "\n\n"); + + return; +} + +/** + * stmmac_open - open entry point of the driver + * @dev : pointer to the device structure. + * Description: + * This function is the open entry point of the driver. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_open(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + int ret; + + /* Check that the MAC address is valid. If its not, refuse + * to bring the device up. The user must specify an + * address using the following linux command: + * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + pr_warning("%s: generated random MAC address %pM\n", dev->name, + dev->dev_addr); + } + + stmmac_verify_args(); + + ret = stmmac_init_phy(dev); + if (unlikely(ret)) { + pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); + return ret; + } + + /* Request the IRQ lines */ + ret = request_irq(dev->irq, &stmmac_interrupt, + IRQF_SHARED, dev->name, dev); + if (unlikely(ret < 0)) { + pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n", + __func__, dev->irq, ret); + return ret; + } + +#ifdef CONFIG_STMMAC_TIMER + priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); + if (unlikely(priv->tm == NULL)) { + pr_err("%s: ERROR: timer memory alloc failed \n", __func__); + return -ENOMEM; + } + priv->tm->freq = tmrate; + + /* Test if the HW timer can be actually used. + * In case of failure continue with no timer. */ + if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) { + pr_warning("stmmaceth: cannot attach the HW timer\n"); + tmrate = 0; + priv->tm->freq = 0; + priv->tm->timer_start = stmmac_no_timer_started; + priv->tm->timer_stop = stmmac_no_timer_stopped; + } +#endif + + /* Create and initialize the TX/RX descriptors chains. */ + priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); + priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); + priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + init_dma_desc_rings(dev); + + /* DMA initialization and SW reset */ + if (unlikely(priv->mac_type->ops->dma_init(ioaddr, + priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) { + + pr_err("%s: DMA initialization failed\n", __func__); + return -1; + } + + /* Copy the MAC addr into the HW */ + priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0); + /* Initialize the MAC Core */ + priv->mac_type->ops->core_init(ioaddr); + + priv->shutdown = 0; + + /* Initialise the MMC (if present) to disable all interrupts. */ + writel(0xffffffff, ioaddr + MMC_HIGH_INTR_MASK); + writel(0xffffffff, ioaddr + MMC_LOW_INTR_MASK); + + /* Enable the MAC Rx/Tx */ + stmmac_mac_enable_rx(ioaddr); + stmmac_mac_enable_tx(ioaddr); + + /* Set the HW DMA mode and the COE */ + stmmac_dma_operation_mode(priv); + + /* Extra statistics */ + memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); + priv->xstats.threshold = tc; + + /* Start the ball rolling... */ + DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name); + stmmac_dma_start_tx(ioaddr); + stmmac_dma_start_rx(ioaddr); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_start(tmrate); +#endif + /* Dump DMA/MAC registers */ + if (netif_msg_hw(priv)) { + priv->mac_type->ops->dump_mac_regs(ioaddr); + priv->mac_type->ops->dump_dma_regs(ioaddr); + } + + if (priv->phydev) + phy_start(priv->phydev); + + napi_enable(&priv->napi); + skb_queue_head_init(&priv->rx_recycle); + netif_start_queue(dev); + return 0; +} + +/** + * stmmac_release - close entry point of the driver + * @dev : device pointer. + * Description: + * This is the stop entry point of the driver. + */ +static int stmmac_release(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Stop and disconnect the PHY */ + if (priv->phydev) { + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + netif_stop_queue(dev); + +#ifdef CONFIG_STMMAC_TIMER + /* Stop and release the timer */ + stmmac_close_ext_timer(); + if (priv->tm != NULL) + kfree(priv->tm); +#endif + napi_disable(&priv->napi); + skb_queue_purge(&priv->rx_recycle); + + /* Free the IRQ lines */ + free_irq(dev->irq, dev); + + /* Stop TX/RX DMA and clear the descriptors */ + stmmac_dma_stop_tx(dev->base_addr); + stmmac_dma_stop_rx(dev->base_addr); + + /* Release and free the Rx/Tx resources */ + free_dma_desc_resources(priv); + + /* Disable the MAC core */ + stmmac_mac_disable_tx(dev->base_addr); + stmmac_mac_disable_rx(dev->base_addr); + + netif_carrier_off(dev); + + return 0; +} + +/* + * To perform emulated hardware segmentation on skb. + */ +static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb) +{ + struct sk_buff *segs, *curr_skb; + int gso_segs = skb_shinfo(skb)->gso_segs; + + /* Estimate the number of fragments in the worst case */ + if (unlikely(stmmac_tx_avail(priv) < gso_segs)) { + netif_stop_queue(priv->dev); + TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n", + __func__); + if (stmmac_tx_avail(priv) < gso_segs) + return NETDEV_TX_BUSY; + + netif_wake_queue(priv->dev); + } + TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n", + skb, skb->len); + + segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); + if (unlikely(IS_ERR(segs))) + goto sw_tso_end; + + do { + curr_skb = segs; + segs = segs->next; + TX_DBG("\t\tcurrent skb->len: %d, *curr %p," + "*next %p\n", curr_skb->len, curr_skb, segs); + curr_skb->next = NULL; + stmmac_xmit(curr_skb, priv->dev); + } while (segs); + +sw_tso_end: + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb, + struct net_device *dev, + int csum_insertion) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int nopaged_len = skb_headlen(skb); + unsigned int txsize = priv->dma_tx_size; + unsigned int entry = priv->cur_tx % txsize; + struct dma_desc *desc = priv->dma_tx + entry; + + if (nopaged_len > BUF_SIZE_8KiB) { + + int buf2_size = nopaged_len - BUF_SIZE_8KiB; + + desc->des2 = dma_map_single(priv->device, skb->data, + BUF_SIZE_8KiB, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB, + csum_insertion); + + entry = (++priv->cur_tx) % txsize; + desc = priv->dma_tx + entry; + + desc->des2 = dma_map_single(priv->device, + skb->data + BUF_SIZE_8KiB, + buf2_size, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 0, + buf2_size, csum_insertion); + priv->mac_type->ops->set_tx_owner(desc); + priv->tx_skbuff[entry] = NULL; + } else { + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + desc->des3 = desc->des2 + BUF_SIZE_4KiB; + priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, + csum_insertion); + } + return entry; +} + +/** + * stmmac_xmit: + * @skb : the socket buffer + * @dev : device pointer + * Description : Tx entry point of the driver. + */ +static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned int txsize = priv->dma_tx_size; + unsigned int entry; + int i, csum_insertion = 0; + int nfrags = skb_shinfo(skb)->nr_frags; + struct dma_desc *desc, *first; + + if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + /* This is a hard error, log it. */ + pr_err("%s: BUG! Tx Ring full when queue awake\n", + __func__); + } + return NETDEV_TX_BUSY; + } + + entry = priv->cur_tx % txsize; + +#ifdef STMMAC_XMIT_DEBUG + if ((skb->len > ETH_FRAME_LEN) || nfrags) + pr_info("stmmac xmit:\n" + "\tskb addr %p - len: %d - nopaged_len: %d\n" + "\tn_frags: %d - ip_summed: %d - %s gso\n", + skb, skb->len, skb_headlen(skb), nfrags, skb->ip_summed, + !skb_is_gso(skb) ? "isn't" : "is"); +#endif + + if (unlikely(skb_is_gso(skb))) + return stmmac_sw_tso(priv, skb); + + if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { + if (likely(priv->tx_coe == NO_HW_CSUM)) + skb_checksum_help(skb); + else + csum_insertion = 1; + } + + desc = priv->dma_tx + entry; + first = desc; + +#ifdef STMMAC_XMIT_DEBUG + if ((nfrags > 0) || (skb->len > ETH_FRAME_LEN)) + pr_debug("stmmac xmit: skb len: %d, nopaged_len: %d,\n" + "\t\tn_frags: %d, ip_summed: %d\n", + skb->len, skb_headlen(skb), nfrags, skb->ip_summed); +#endif + priv->tx_skbuff[entry] = skb; + if (unlikely(skb->len >= BUF_SIZE_4KiB)) { + entry = stmmac_handle_jumbo_frames(skb, dev, csum_insertion); + desc = priv->dma_tx + entry; + } else { + unsigned int nopaged_len = skb_headlen(skb); + desc->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len, + csum_insertion); + } + + for (i = 0; i < nfrags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + int len = frag->size; + + entry = (++priv->cur_tx) % txsize; + desc = priv->dma_tx + entry; + + TX_DBG("\t[entry %d] segment len: %d\n", entry, len); + desc->des2 = dma_map_page(priv->device, frag->page, + frag->page_offset, + len, DMA_TO_DEVICE); + priv->tx_skbuff[entry] = NULL; + priv->mac_type->ops->prepare_tx_desc(desc, 0, len, + csum_insertion); + priv->mac_type->ops->set_tx_owner(desc); + } + + /* Interrupt on completition only for the latest segment */ + priv->mac_type->ops->close_tx_desc(desc); +#ifdef CONFIG_STMMAC_TIMER + /* Clean IC while using timers */ + priv->mac_type->ops->clear_tx_ic(desc); +#endif + /* To avoid raise condition */ + priv->mac_type->ops->set_tx_owner(first); + + priv->cur_tx++; + +#ifdef STMMAC_XMIT_DEBUG + if (netif_msg_pktdata(priv)) { + pr_info("stmmac xmit: current=%d, dirty=%d, entry=%d, " + "first=%p, nfrags=%d\n", + (priv->cur_tx % txsize), (priv->dirty_tx % txsize), + entry, first, nfrags); + display_ring(priv->dma_tx, txsize); + pr_info(">>> frame to be transmitted: "); + print_pkt(skb->data, skb->len); + } +#endif + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { + TX_DBG("%s: stop transmitted packets\n", __func__); + netif_stop_queue(dev); + } + + dev->stats.tx_bytes += skb->len; + + /* CSR1 enables the transmit DMA to check for new descriptor */ + writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND); + + return NETDEV_TX_OK; +} + +static inline void stmmac_rx_refill(struct stmmac_priv *priv) +{ + unsigned int rxsize = priv->dma_rx_size; + int bfsize = priv->dma_buf_sz; + struct dma_desc *p = priv->dma_rx; + + for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { + unsigned int entry = priv->dirty_rx % rxsize; + if (likely(priv->rx_skbuff[entry] == NULL)) { + struct sk_buff *skb; + + skb = __skb_dequeue(&priv->rx_recycle); + if (skb == NULL) + skb = netdev_alloc_skb_ip_align(priv->dev, + bfsize); + + if (unlikely(skb == NULL)) + break; + + priv->rx_skbuff[entry] = skb; + priv->rx_skbuff_dma[entry] = + dma_map_single(priv->device, skb->data, bfsize, + DMA_FROM_DEVICE); + + (p + entry)->des2 = priv->rx_skbuff_dma[entry]; + if (unlikely(priv->is_gmac)) { + if (bfsize >= BUF_SIZE_8KiB) + (p + entry)->des3 = + (p + entry)->des2 + BUF_SIZE_8KiB; + } + RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); + } + priv->mac_type->ops->set_rx_owner(p + entry); + } + return; +} + +static int stmmac_rx(struct stmmac_priv *priv, int limit) +{ + unsigned int rxsize = priv->dma_rx_size; + unsigned int entry = priv->cur_rx % rxsize; + unsigned int next_entry; + unsigned int count = 0; + struct dma_desc *p = priv->dma_rx + entry; + struct dma_desc *p_next; + +#ifdef STMMAC_RX_DEBUG + if (netif_msg_hw(priv)) { + pr_debug(">>> stmmac_rx: descriptor ring:\n"); + display_ring(priv->dma_rx, rxsize); + } +#endif + count = 0; + while (!priv->mac_type->ops->get_rx_owner(p)) { + int status; + + if (count >= limit) + break; + + count++; + + next_entry = (++priv->cur_rx) % rxsize; + p_next = priv->dma_rx + next_entry; + prefetch(p_next); + + /* read the status of the incoming frame */ + status = (priv->mac_type->ops->rx_status(&priv->dev->stats, + &priv->xstats, p)); + if (unlikely(status == discard_frame)) + priv->dev->stats.rx_errors++; + else { + struct sk_buff *skb; + /* Length should omit the CRC */ + int frame_len = + priv->mac_type->ops->get_rx_frame_len(p) - 4; + +#ifdef STMMAC_RX_DEBUG + if (frame_len > ETH_FRAME_LEN) + pr_debug("\tRX frame size %d, COE status: %d\n", + frame_len, status); + + if (netif_msg_hw(priv)) + pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", + p, entry, p->des2); +#endif + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx descriptor chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, DMA_FROM_DEVICE); +#ifdef STMMAC_RX_DEBUG + if (netif_msg_pktdata(priv)) { + pr_info(" frame received (%dbytes)", frame_len); + print_pkt(skb->data, frame_len); + } +#endif + skb->protocol = eth_type_trans(skb, priv->dev); + + if (unlikely(status == csum_none)) { + /* always for the old mac 10/100 */ + skb->ip_summed = CHECKSUM_NONE; + netif_receive_skb(skb); + } else { + skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&priv->napi, skb); + } + + priv->dev->stats.rx_packets++; + priv->dev->stats.rx_bytes += frame_len; + priv->dev->last_rx = jiffies; + } + entry = next_entry; + p = p_next; /* use prefetched values */ + } + + stmmac_rx_refill(priv); + + priv->xstats.rx_pkt_n += count; + + return count; +} + +/** + * stmmac_poll - stmmac poll method (NAPI) + * @napi : pointer to the napi structure. + * @budget : maximum number of packets that the current CPU can receive from + * all interfaces. + * Description : + * This function implements the the reception process. + * Also it runs the TX completion thread + */ +static int stmmac_poll(struct napi_struct *napi, int budget) +{ + struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi); + int work_done = 0; + + priv->xstats.poll_n++; + stmmac_tx(priv); + work_done = stmmac_rx(priv, budget); + + if (work_done < budget) { + napi_complete(napi); + stmmac_enable_irq(priv); + } + return work_done; +} + +/** + * stmmac_tx_timeout + * @dev : Pointer to net device structure + * Description: this function is called when a packet transmission fails to + * complete within a reasonable tmrate. The driver will mark the error in the + * netdev structure and arrange for the device to be reset to a sane state + * in order to transmit a new packet. + */ +static void stmmac_tx_timeout(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + /* Clear Tx resources and restart transmitting again */ + stmmac_tx_err(priv); + return; +} + +/* Configuration changes (passed on by ifconfig) */ +static int stmmac_config(struct net_device *dev, struct ifmap *map) +{ + if (dev->flags & IFF_UP) /* can't act on a running interface */ + return -EBUSY; + + /* Don't allow changing the I/O address */ + if (map->base_addr != dev->base_addr) { + pr_warning("%s: can't change I/O address\n", dev->name); + return -EOPNOTSUPP; + } + + /* Don't allow changing the IRQ */ + if (map->irq != dev->irq) { + pr_warning("%s: can't change IRQ number %d\n", + dev->name, dev->irq); + return -EOPNOTSUPP; + } + + /* ignore other fields */ + return 0; +} + +/** + * stmmac_multicast_list - entry point for multicast addressing + * @dev : pointer to the device structure + * Description: + * This function is a driver entry point which gets called by the kernel + * whenever multicast addresses must be enabled/disabled. + * Return value: + * void. + */ +static void stmmac_multicast_list(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + spin_lock(&priv->lock); + priv->mac_type->ops->set_filter(dev); + spin_unlock(&priv->lock); + return; +} + +/** + * stmmac_change_mtu - entry point to change MTU size for the device. + * @dev : device pointer. + * @new_mtu : the new MTU size for the device. + * Description: the Maximum Transfer Unit (MTU) is used by the network layer + * to drive packet transmission. Ethernet has an MTU of 1500 octets + * (ETH_DATA_LEN). This value can be changed with ifconfig. + * Return value: + * 0 on success and an appropriate (-)ve integer as defined in errno.h + * file on failure. + */ +static int stmmac_change_mtu(struct net_device *dev, int new_mtu) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int max_mtu; + + if (netif_running(dev)) { + pr_err("%s: must be stopped to change its MTU\n", dev->name); + return -EBUSY; + } + + if (priv->is_gmac) + max_mtu = JUMBO_LEN; + else + max_mtu = ETH_DATA_LEN; + + if ((new_mtu < 46) || (new_mtu > max_mtu)) { + pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu); + return -EINVAL; + } + + dev->mtu = new_mtu; + + return 0; +} + +static irqreturn_t stmmac_interrupt(int irq, void *dev_id) +{ + struct net_device *dev = (struct net_device *)dev_id; + struct stmmac_priv *priv = netdev_priv(dev); + + if (unlikely(!dev)) { + pr_err("%s: invalid dev pointer\n", __func__); + return IRQ_NONE; + } + + if (priv->is_gmac) { + unsigned long ioaddr = dev->base_addr; + /* To handle GMAC own interrupts */ + priv->mac_type->ops->host_irq_status(ioaddr); + } + stmmac_dma_interrupt(dev); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling receive - used by NETCONSOLE and other diagnostic tools + * to allow network I/O with interrupts disabled. */ +static void stmmac_poll_controller(struct net_device *dev) +{ + disable_irq(dev->irq); + stmmac_interrupt(dev->irq, dev); + enable_irq(dev->irq); +} +#endif + +/** + * stmmac_ioctl - Entry point for the Ioctl + * @dev: Device pointer. + * @rq: An IOCTL specefic structure, that can contain a pointer to + * a proprietary structure used to pass information to the driver. + * @cmd: IOCTL command + * Description: + * Currently there are no special functionality supported in IOCTL, just the + * phy_mii_ioctl(...) can be invoked. + */ +static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -EINVAL; + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + if (!priv->phydev) + return -EINVAL; + + spin_lock(&priv->lock); + ret = phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + spin_unlock(&priv->lock); + default: + break; + } + return ret; +} + +#ifdef STMMAC_VLAN_TAG_USED +static void stmmac_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct stmmac_priv *priv = netdev_priv(dev); + + DBG(probe, INFO, "%s: Setting vlgrp to %p\n", dev->name, grp); + + spin_lock(&priv->lock); + priv->vlgrp = grp; + spin_unlock(&priv->lock); + + return; +} +#endif + +static const struct net_device_ops stmmac_netdev_ops = { + .ndo_open = stmmac_open, + .ndo_start_xmit = stmmac_xmit, + .ndo_stop = stmmac_release, + .ndo_change_mtu = stmmac_change_mtu, + .ndo_set_multicast_list = stmmac_multicast_list, + .ndo_tx_timeout = stmmac_tx_timeout, + .ndo_do_ioctl = stmmac_ioctl, + .ndo_set_config = stmmac_config, +#ifdef STMMAC_VLAN_TAG_USED + .ndo_vlan_rx_register = stmmac_vlan_rx_register, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = stmmac_poll_controller, +#endif + .ndo_set_mac_address = eth_mac_addr, +}; + +/** + * stmmac_probe - Initialization of the adapter . + * @dev : device pointer + * Description: The function initializes the network device structure for + * the STMMAC driver. It also calls the low level routines + * in order to init the HW (i.e. the DMA engine) + */ +static int stmmac_probe(struct net_device *dev) +{ + int ret = 0; + struct stmmac_priv *priv = netdev_priv(dev); + + ether_setup(dev); + + dev->netdev_ops = &stmmac_netdev_ops; + stmmac_set_ethtool_ops(dev); + + dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); + dev->watchdog_timeo = msecs_to_jiffies(watchdog); +#ifdef STMMAC_VLAN_TAG_USED + /* Both mac100 and gmac support receive VLAN tag detection */ + dev->features |= NETIF_F_HW_VLAN_RX; +#endif + priv->msg_enable = netif_msg_init(debug, default_msg_level); + + if (priv->is_gmac) + priv->rx_csum = 1; + + if (flow_ctrl) + priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ + + priv->pause = pause; + netif_napi_add(dev, &priv->napi, stmmac_poll, 64); + + /* Get the MAC address */ + priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0); + + if (!is_valid_ether_addr(dev->dev_addr)) + pr_warning("\tno valid MAC address;" + "please, use ifconfig or nwhwconfig!\n"); + + ret = register_netdev(dev); + if (ret) { + pr_err("%s: ERROR %i registering the device\n", + __func__, ret); + return -ENODEV; + } + + DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", + dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", + (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); + + spin_lock_init(&priv->lock); + + return ret; +} + +/** + * stmmac_mac_device_setup + * @dev : device pointer + * Description: select and initialise the mac device (mac100 or Gmac). + */ +static int stmmac_mac_device_setup(struct net_device *dev) +{ + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + struct mac_device_info *device; + + if (priv->is_gmac) + device = gmac_setup(ioaddr); + else + device = mac100_setup(ioaddr); + + if (!device) + return -ENOMEM; + + priv->mac_type = device; + + priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */ + if (priv->wolenabled == PMT_SUPPORTED) + priv->wolopts = WAKE_MAGIC; /* Magic Frame */ + + return 0; +} + +static int stmmacphy_dvr_probe(struct platform_device *pdev) +{ + struct plat_stmmacphy_data *plat_dat; + plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data); + + pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n", + plat_dat->bus_id); + + return 0; +} + +static int stmmacphy_dvr_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver stmmacphy_driver = { + .driver = { + .name = PHY_RESOURCE_NAME, + }, + .probe = stmmacphy_dvr_probe, + .remove = stmmacphy_dvr_remove, +}; + +/** + * stmmac_associate_phy + * @dev: pointer to device structure + * @data: points to the private structure. + * Description: Scans through all the PHYs we have registered and checks if + * any are associated with our MAC. If so, then just fill in + * the blanks in our local context structure + */ +static int stmmac_associate_phy(struct device *dev, void *data) +{ + struct stmmac_priv *priv = (struct stmmac_priv *)data; + struct plat_stmmacphy_data *plat_dat; + + plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data); + + DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__, + plat_dat->bus_id); + + /* Check that this phy is for the MAC being initialised */ + if (priv->bus_id != plat_dat->bus_id) + return 0; + + /* OK, this PHY is connected to the MAC. + Go ahead and get the parameters */ + DBG(probe, DEBUG, "%s: OK. Found PHY config\n", __func__); + priv->phy_irq = + platform_get_irq_byname(to_platform_device(dev), "phyirq"); + DBG(probe, DEBUG, "%s: PHY irq on bus %d is %d\n", __func__, + plat_dat->bus_id, priv->phy_irq); + + /* Override with kernel parameters if supplied XXX CRS XXX + * this needs to have multiple instances */ + if ((phyaddr >= 0) && (phyaddr <= 31)) + plat_dat->phy_addr = phyaddr; + + priv->phy_addr = plat_dat->phy_addr; + priv->phy_mask = plat_dat->phy_mask; + priv->phy_interface = plat_dat->interface; + priv->phy_reset = plat_dat->phy_reset; + + DBG(probe, DEBUG, "%s: exiting\n", __func__); + return 1; /* forces exit of driver_for_each_device() */ +} + +/** + * stmmac_dvr_probe + * @pdev: platform device pointer + * Description: the driver is initialized through platform_device. + */ +static int stmmac_dvr_probe(struct platform_device *pdev) +{ + int ret = 0; + struct resource *res; + unsigned int *addr = NULL; + struct net_device *ndev = NULL; + struct stmmac_priv *priv; + struct plat_stmmacenet_data *plat_dat; + + pr_info("STMMAC driver:\n\tplatform registration... "); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENODEV; + goto out; + } + pr_info("done!\n"); + + if (!request_mem_region(res->start, (res->end - res->start), + pdev->name)) { + pr_err("%s: ERROR: memory allocation failed" + "cannot get the I/O addr 0x%x\n", + __func__, (unsigned int)res->start); + ret = -EBUSY; + goto out; + } + + addr = ioremap(res->start, (res->end - res->start)); + if (!addr) { + pr_err("%s: ERROR: memory mapping failed \n", __func__); + ret = -ENOMEM; + goto out; + } + + ndev = alloc_etherdev(sizeof(struct stmmac_priv)); + if (!ndev) { + pr_err("%s: ERROR: allocating the device\n", __func__); + ret = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + /* Get the MAC information */ + ndev->irq = platform_get_irq_byname(pdev, "macirq"); + if (ndev->irq == -ENXIO) { + pr_err("%s: ERROR: MAC IRQ configuration " + "information not found\n", __func__); + ret = -ENODEV; + goto out; + } + + priv = netdev_priv(ndev); + priv->device = &(pdev->dev); + priv->dev = ndev; + plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data); + priv->bus_id = plat_dat->bus_id; + priv->pbl = plat_dat->pbl; /* TLI */ + priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ + + platform_set_drvdata(pdev, ndev); + + /* Set the I/O base addr */ + ndev->base_addr = (unsigned long)addr; + + /* MAC HW revice detection */ + ret = stmmac_mac_device_setup(ndev); + if (ret < 0) + goto out; + + /* Network Device Registration */ + ret = stmmac_probe(ndev); + if (ret < 0) + goto out; + + /* associate a PHY - it is provided by another platform bus */ + if (!driver_for_each_device + (&(stmmacphy_driver.driver), NULL, (void *)priv, + stmmac_associate_phy)) { + pr_err("No PHY device is associated with this MAC!\n"); + ret = -ENODEV; + goto out; + } + + priv->fix_mac_speed = plat_dat->fix_mac_speed; + priv->bsp_priv = plat_dat->bsp_priv; + + pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" + "\tIO base addr: 0x%08x)\n", ndev->name, pdev->name, + pdev->id, ndev->irq, (unsigned int)addr); + + /* MDIO bus Registration */ + pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); + ret = stmmac_mdio_register(ndev); + if (ret < 0) + goto out; + pr_debug("registered!\n"); + +out: + if (ret < 0) { + platform_set_drvdata(pdev, NULL); + release_mem_region(res->start, (res->end - res->start)); + if (addr != NULL) + iounmap(addr); + } + + return ret; +} + +/** + * stmmac_dvr_remove + * @pdev: platform device pointer + * Description: this function resets the TX/RX processes, disables the MAC RX/TX + * changes the link status, releases the DMA descriptor rings, + * unregisters the MDIO bus and unmaps the allocated memory. + */ +static int stmmac_dvr_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct resource *res; + + pr_info("%s:\n\tremoving driver", __func__); + + stmmac_dma_stop_rx(ndev->base_addr); + stmmac_dma_stop_tx(ndev->base_addr); + + stmmac_mac_disable_rx(ndev->base_addr); + stmmac_mac_disable_tx(ndev->base_addr); + + netif_carrier_off(ndev); + + stmmac_mdio_unregister(ndev); + + platform_set_drvdata(pdev, NULL); + unregister_netdev(ndev); + + iounmap((void *)ndev->base_addr); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, (res->end - res->start)); + + free_netdev(ndev); + + return 0; +} + +#ifdef CONFIG_PM +static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + int dis_ic = 0; + + if (!dev || !netif_running(dev)) + return 0; + + spin_lock(&priv->lock); + + if (state.event == PM_EVENT_SUSPEND) { + netif_device_detach(dev); + netif_stop_queue(dev); + if (priv->phydev) + phy_stop(priv->phydev); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_stop(); + dis_ic = 1; +#endif + napi_disable(&priv->napi); + + /* Stop TX/RX DMA */ + stmmac_dma_stop_tx(dev->base_addr); + stmmac_dma_stop_rx(dev->base_addr); + /* Clear the Rx/Tx descriptors */ + priv->mac_type->ops->init_rx_desc(priv->dma_rx, + priv->dma_rx_size, dis_ic); + priv->mac_type->ops->init_tx_desc(priv->dma_tx, + priv->dma_tx_size); + + stmmac_mac_disable_tx(dev->base_addr); + + if (device_may_wakeup(&(pdev->dev))) { + /* Enable Power down mode by programming the PMT regs */ + if (priv->wolenabled == PMT_SUPPORTED) + priv->mac_type->ops->pmt(dev->base_addr, + priv->wolopts); + } else { + stmmac_mac_disable_rx(dev->base_addr); + } + } else { + priv->shutdown = 1; + /* Although this can appear slightly redundant it actually + * makes fast the standby operation and guarantees the driver + * working if hibernation is on media. */ + stmmac_release(dev); + } + + spin_unlock(&priv->lock); + return 0; +} + +static int stmmac_resume(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct stmmac_priv *priv = netdev_priv(dev); + unsigned long ioaddr = dev->base_addr; + + if (!netif_running(dev)) + return 0; + + spin_lock(&priv->lock); + + if (priv->shutdown) { + /* Re-open the interface and re-init the MAC/DMA + and the rings. */ + stmmac_open(dev); + goto out_resume; + } + + /* Power Down bit, into the PM register, is cleared + * automatically as soon as a magic packet or a Wake-up frame + * is received. Anyway, it's better to manually clear + * this bit because it can generate problems while resuming + * from another devices (e.g. serial console). */ + if (device_may_wakeup(&(pdev->dev))) + if (priv->wolenabled == PMT_SUPPORTED) + priv->mac_type->ops->pmt(dev->base_addr, 0); + + netif_device_attach(dev); + + /* Enable the MAC and DMA */ + stmmac_mac_enable_rx(ioaddr); + stmmac_mac_enable_tx(ioaddr); + stmmac_dma_start_tx(ioaddr); + stmmac_dma_start_rx(ioaddr); + +#ifdef CONFIG_STMMAC_TIMER + priv->tm->timer_start(tmrate); +#endif + napi_enable(&priv->napi); + + if (priv->phydev) + phy_start(priv->phydev); + + netif_start_queue(dev); + +out_resume: + spin_unlock(&priv->lock); + return 0; +} +#endif + +static struct platform_driver stmmac_driver = { + .driver = { + .name = STMMAC_RESOURCE_NAME, + }, + .probe = stmmac_dvr_probe, + .remove = stmmac_dvr_remove, +#ifdef CONFIG_PM + .suspend = stmmac_suspend, + .resume = stmmac_resume, +#endif + +}; + +/** + * stmmac_init_module - Entry point for the driver + * Description: This function is the entry point for the driver. + */ +static int __init stmmac_init_module(void) +{ + int ret; + + if (platform_driver_register(&stmmacphy_driver)) { + pr_err("No PHY devices registered!\n"); + return -ENODEV; + } + + ret = platform_driver_register(&stmmac_driver); + return ret; +} + +/** + * stmmac_cleanup_module - Cleanup routine for the driver + * Description: This function is the cleanup routine for the driver. + */ +static void __exit stmmac_cleanup_module(void) +{ + platform_driver_unregister(&stmmacphy_driver); + platform_driver_unregister(&stmmac_driver); +} + +#ifndef MODULE +static int __init stmmac_cmdline_opt(char *str) +{ + char *opt; + + if (!str || !*str) + return -EINVAL; + while ((opt = strsep(&str, ",")) != NULL) { + if (!strncmp(opt, "debug:", 6)) + strict_strtoul(opt + 6, 0, (unsigned long *)&debug); + else if (!strncmp(opt, "phyaddr:", 8)) + strict_strtoul(opt + 8, 0, (unsigned long *)&phyaddr); + else if (!strncmp(opt, "dma_txsize:", 11)) + strict_strtoul(opt + 11, 0, + (unsigned long *)&dma_txsize); + else if (!strncmp(opt, "dma_rxsize:", 11)) + strict_strtoul(opt + 11, 0, + (unsigned long *)&dma_rxsize); + else if (!strncmp(opt, "buf_sz:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&buf_sz); + else if (!strncmp(opt, "tc:", 3)) + strict_strtoul(opt + 3, 0, (unsigned long *)&tc); + else if (!strncmp(opt, "tx_coe:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&tx_coe); + else if (!strncmp(opt, "watchdog:", 9)) + strict_strtoul(opt + 9, 0, (unsigned long *)&watchdog); + else if (!strncmp(opt, "flow_ctrl:", 10)) + strict_strtoul(opt + 10, 0, + (unsigned long *)&flow_ctrl); + else if (!strncmp(opt, "pause:", 6)) + strict_strtoul(opt + 6, 0, (unsigned long *)&pause); +#ifdef CONFIG_STMMAC_TIMER + else if (!strncmp(opt, "tmrate:", 7)) + strict_strtoul(opt + 7, 0, (unsigned long *)&tmrate); +#endif + } + return 0; +} + +__setup("stmmaceth=", stmmac_cmdline_opt); +#endif + +module_init(stmmac_init_module); +module_exit(stmmac_cleanup_module); + +MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver"); +MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c new file mode 100644 index 00000000000..8498552a22f --- /dev/null +++ b/drivers/net/stmmac/stmmac_mdio.c @@ -0,0 +1,217 @@ +/******************************************************************************* + STMMAC Ethernet Driver -- MDIO bus implementation + Provides Bus interface for MII registers + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Carl Shaw <carl.shaw@st.com> + Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/netdevice.h> +#include <linux/mii.h> +#include <linux/phy.h> + +#include "stmmac.h" + +#define MII_BUSY 0x00000001 +#define MII_WRITE 0x00000002 + +/** + * stmmac_mdio_read + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * Description: it reads data from the MII register from within the phy device. + * For the 7111 GMAC, we must set the bit 0 in the MII address register while + * accessing the PHY registers. + * Fortunately, it seems this has no drawback for the 7109 MAC. + */ +static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + unsigned int mii_data = priv->mac_type->hw.mii.data; + + int data; + u16 regValue = (((phyaddr << 11) & (0x0000F800)) | + ((phyreg << 6) & (0x000007C0))); + regValue |= MII_BUSY; /* in case of GMAC */ + + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + writel(regValue, ioaddr + mii_address); + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + /* Read the data from the MII data register */ + data = (int)readl(ioaddr + mii_data); + + return data; +} + +/** + * stmmac_mdio_write + * @bus: points to the mii_bus structure + * @phyaddr: MII addr reg bits 15-11 + * @phyreg: MII addr reg bits 10-6 + * @phydata: phy data + * Description: it writes the data into the MII register from within the device. + */ +static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, + u16 phydata) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + unsigned int mii_data = priv->mac_type->hw.mii.data; + + u16 value = + (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) + | MII_WRITE; + + value |= MII_BUSY; + + /* Wait until any existing MII operation is complete */ + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + /* Set the MII address register to write */ + writel(phydata, ioaddr + mii_data); + writel(value, ioaddr + mii_address); + + /* Wait until any existing MII operation is complete */ + do {} while (((readl(ioaddr + mii_address)) & MII_BUSY) == 1); + + return 0; +} + +/** + * stmmac_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +static int stmmac_mdio_reset(struct mii_bus *bus) +{ + struct net_device *ndev = bus->priv; + struct stmmac_priv *priv = netdev_priv(ndev); + unsigned long ioaddr = ndev->base_addr; + unsigned int mii_address = priv->mac_type->hw.mii.addr; + + if (priv->phy_reset) { + pr_debug("stmmac_mdio_reset: calling phy_reset\n"); + priv->phy_reset(priv->bsp_priv); + } + + /* This is a workaround for problems with the STE101P PHY. + * It doesn't complete its reset until at least one clock cycle + * on MDC, so perform a dummy mdio read. + */ + writel(0, ioaddr + mii_address); + + return 0; +} + +/** + * stmmac_mdio_register + * @ndev: net device structure + * Description: it registers the MII bus + */ +int stmmac_mdio_register(struct net_device *ndev) +{ + int err = 0; + struct mii_bus *new_bus; + int *irqlist; + struct stmmac_priv *priv = netdev_priv(ndev); + int addr, found; + + new_bus = mdiobus_alloc(); + if (new_bus == NULL) + return -ENOMEM; + + irqlist = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (irqlist == NULL) { + err = -ENOMEM; + goto irqlist_alloc_fail; + } + + /* Assign IRQ to phy at address phy_addr */ + if (priv->phy_addr != -1) + irqlist[priv->phy_addr] = priv->phy_irq; + + new_bus->name = "STMMAC MII Bus"; + new_bus->read = &stmmac_mdio_read; + new_bus->write = &stmmac_mdio_write; + new_bus->reset = &stmmac_mdio_reset; + snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); + new_bus->priv = ndev; + new_bus->irq = irqlist; + new_bus->phy_mask = priv->phy_mask; + new_bus->parent = priv->device; + err = mdiobus_register(new_bus); + if (err != 0) { + pr_err("%s: Cannot register as MDIO bus\n", new_bus->name); + goto bus_register_fail; + } + + priv->mii = new_bus; + + found = 0; + for (addr = 0; addr < 32; addr++) { + struct phy_device *phydev = new_bus->phy_map[addr]; + if (phydev) { + if (priv->phy_addr == -1) { + priv->phy_addr = addr; + phydev->irq = priv->phy_irq; + irqlist[addr] = priv->phy_irq; + } + pr_info("%s: PHY ID %08x at %d IRQ %d (%s)%s\n", + ndev->name, phydev->phy_id, addr, + phydev->irq, dev_name(&phydev->dev), + (addr == priv->phy_addr) ? " active" : ""); + found = 1; + } + } + + if (!found) + pr_warning("%s: No PHY found\n", ndev->name); + + return 0; +bus_register_fail: + kfree(irqlist); +irqlist_alloc_fail: + kfree(new_bus); + return err; +} + +/** + * stmmac_mdio_unregister + * @ndev: net device structure + * Description: it unregisters the MII bus + */ +int stmmac_mdio_unregister(struct net_device *ndev) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + mdiobus_unregister(priv->mii); + priv->mii->priv = NULL; + kfree(priv->mii); + + return 0; +} diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c new file mode 100644 index 00000000000..b838c658207 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.c @@ -0,0 +1,140 @@ +/******************************************************************************* + STMMAC external timer support. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include "stmmac_timer.h" + +static void stmmac_timer_handler(void *data) +{ + struct net_device *dev = (struct net_device *)data; + + stmmac_schedule(dev); + + return; +} + +#define STMMAC_TIMER_MSG(timer, freq) \ +printk(KERN_INFO "stmmac_timer: %s Timer ON (freq %dHz)\n", timer, freq); + +#if defined(CONFIG_STMMAC_RTC_TIMER) +#include <linux/rtc.h> +static struct rtc_device *stmmac_rtc; +static rtc_task_t stmmac_task; + +static void stmmac_rtc_start(unsigned int new_freq) +{ + rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq); + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1); + return; +} + +static void stmmac_rtc_stop(void) +{ + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); + return; +} + +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) +{ + stmmac_task.private_data = dev; + stmmac_task.func = stmmac_timer_handler; + + stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); + if (stmmac_rtc == NULL) { + pr_error("open rtc device failed\n"); + return -ENODEV; + } + + rtc_irq_register(stmmac_rtc, &stmmac_task); + + /* Periodic mode is not supported */ + if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) { + pr_error("set periodic failed\n"); + rtc_irq_unregister(stmmac_rtc, &stmmac_task); + rtc_class_close(stmmac_rtc); + return -1; + } + + STMMAC_TIMER_MSG(CONFIG_RTC_HCTOSYS_DEVICE, tm->freq); + + tm->timer_start = stmmac_rtc_start; + tm->timer_stop = stmmac_rtc_stop; + + return 0; +} + +int stmmac_close_ext_timer(void) +{ + rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); + rtc_irq_unregister(stmmac_rtc, &stmmac_task); + rtc_class_close(stmmac_rtc); + return 0; +} + +#elif defined(CONFIG_STMMAC_TMU_TIMER) +#include <linux/clk.h> +#define TMU_CHANNEL "tmu2_clk" +static struct clk *timer_clock; + +static void stmmac_tmu_start(unsigned int new_freq) +{ + clk_set_rate(timer_clock, new_freq); + clk_enable(timer_clock); + return; +} + +static void stmmac_tmu_stop(void) +{ + clk_disable(timer_clock); + return; +} + +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) +{ + timer_clock = clk_get(NULL, TMU_CHANNEL); + + if (timer_clock == NULL) + return -1; + + if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) { + timer_clock = NULL; + return -1; + } + + STMMAC_TIMER_MSG("TMU2", tm->freq); + tm->timer_start = stmmac_tmu_start; + tm->timer_stop = stmmac_tmu_stop; + + return 0; +} + +int stmmac_close_ext_timer(void) +{ + clk_disable(timer_clock); + tmu2_unregister_user(); + clk_put(timer_clock); + return 0; +} +#endif diff --git a/drivers/net/stmmac/stmmac_timer.h b/drivers/net/stmmac/stmmac_timer.h new file mode 100644 index 00000000000..f795cae3372 --- /dev/null +++ b/drivers/net/stmmac/stmmac_timer.h @@ -0,0 +1,41 @@ +/******************************************************************************* + STMMAC external timer Header File. + + Copyright (C) 2007-2009 STMicroelectronics Ltd + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> +*******************************************************************************/ + +struct stmmac_timer { + void (*timer_start) (unsigned int new_freq); + void (*timer_stop) (void); + unsigned int freq; +}; + +/* Open the HW timer device and return 0 in case of success */ +int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm); +/* Stop the timer and release it */ +int stmmac_close_ext_timer(void); +/* Function used for scheduling task within the stmmac */ +void stmmac_schedule(struct net_device *dev); + +#if defined(CONFIG_STMMAC_TMU_TIMER) +extern int tmu2_register_user(void *fnt, void *data); +extern void tmu2_unregister_user(void); +#endif diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c index 36cb2423bcf..75fa32e34fd 100644 --- a/drivers/net/tokenring/ibmtr.c +++ b/drivers/net/tokenring/ibmtr.c @@ -1144,9 +1144,16 @@ static void dir_open_adapter (struct net_device *dev) } else { char **prphase = printphase; char **prerror = printerror; + int pnr = err / 16 - 1; + int enr = err % 16 - 1; DPRINTK("TR Adapter misc open failure, error code = "); - printk("0x%x, Phase: %s, Error: %s\n", - err, prphase[err/16 -1], prerror[err%16 -1]); + if (pnr < 0 || pnr >= ARRAY_SIZE(printphase) || + enr < 0 || + enr >= ARRAY_SIZE(printerror)) + printk("0x%x, invalid Phase/Error.", err); + else + printk("0x%x, Phase: %s, Error: %s\n", err, + prphase[pnr], prerror[enr]); printk(" retrying after %ds delay...\n", TR_RETRY_INTERVAL/HZ); } diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 72470f77f55..a2b30a10064 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -649,6 +649,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */ .driver_info = (unsigned long)&dm9601_info, }, + { + USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ + .driver_info = (unsigned long)&dm9601_info, + }, {}, // END }; diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 6fdaba8674b..ed4a508ef26 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -62,8 +62,11 @@ static char *devid=NULL; static struct usb_eth_dev usb_dev_id[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.name = pn, .vendor = vid, .device = pid, .private = flags}, +#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ + PEGASUS_DEV(pn, vid, pid, flags) #include "pegasus.h" #undef PEGASUS_DEV +#undef PEGASUS_DEV_CLASS {NULL, 0, 0, 0}, {NULL, 0, 0, 0} }; @@ -71,8 +74,18 @@ static struct usb_eth_dev usb_dev_id[] = { static struct usb_device_id pegasus_ids[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, +/* + * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product + * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to + * ignore adaptors belonging to the "Wireless" class 0xE0. For this one + * case anyway, seeing as the pegasus is for "Wired" adaptors. + */ +#define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ + {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \ + .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass}, #include "pegasus.h" #undef PEGASUS_DEV +#undef PEGASUS_DEV_CLASS {}, {} }; diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h index f968c834ff6..5d02f020073 100644 --- a/drivers/net/usb/pegasus.h +++ b/drivers/net/usb/pegasus.h @@ -202,7 +202,11 @@ PEGASUS_DEV( "AEI USB Fast Ethernet Adapter", VENDOR_AEILAB, 0x1701, DEFAULT_GPIO_RESET | PEGASUS_II ) PEGASUS_DEV( "Allied Telesyn Int. AT-USB100", VENDOR_ALLIEDTEL, 0xb100, DEFAULT_GPIO_RESET | PEGASUS_II ) -PEGASUS_DEV( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, +/* + * Distinguish between this Belkin adaptor and the Belkin bluetooth adaptors + * with the same product IDs by checking the device class too. + */ +PEGASUS_DEV_CLASS( "Belkin F5D5050 USB Ethernet", VENDOR_BELKIN, 0x0121, 0x00, DEFAULT_GPIO_RESET | PEGASUS_II ) PEGASUS_DEV( "Billionton USB-100", VENDOR_BILLIONTON, 0x0986, DEFAULT_GPIO_RESET ) diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 0caa8008c51..f56dec6119c 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -362,12 +362,12 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) retval = -EINVAL; goto halt_fail_and_release; } - dev->hard_mtu = tmp; - net->mtu = dev->hard_mtu - net->hard_header_len; dev_warn(&intf->dev, "dev can't take %u byte packets (max %u), " "adjusting MTU to %u\n", - dev->hard_mtu, tmp, net->mtu); + dev->hard_mtu, tmp, tmp - net->hard_header_len); + dev->hard_mtu = tmp; + net->mtu = dev->hard_mtu - net->hard_header_len; } /* REVISIT: peripheral "alignment" request is ignored ... */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8d009760277..05630f2f693 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -22,7 +22,6 @@ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/virtio.h> -#include <linux/virtio_ids.h> #include <linux/virtio_net.h> #include <linux/scatterlist.h> #include <linux/if_vlan.h> @@ -454,7 +453,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) vi->dev->stats.tx_bytes += skb->len; vi->dev->stats.tx_packets++; tot_sgs += skb_vnet_hdr(skb)->num_sg; - kfree_skb(skb); + dev_kfree_skb_any(skb); } return tot_sgs; } @@ -517,8 +516,7 @@ again: /* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); - /* Put new one in send queue and do transmit */ - __skb_queue_head(&vi->send, skb); + /* Try to transmit */ capacity = xmit_skb(vi, skb); /* This can happen with OOM and indirect buffers. */ @@ -532,8 +530,17 @@ again: } return NETDEV_TX_BUSY; } - vi->svq->vq_ops->kick(vi->svq); + + /* + * Put new one in send queue. You'd expect we'd need this before + * xmit_skb calls add_buf(), since the callback can be triggered + * immediately after that. But since the callback just triggers + * another call back here, normal network xmit locking prevents the + * race. + */ + __skb_queue_head(&vi->send, skb); + /* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile new file mode 100644 index 00000000000..880f5098eac --- /dev/null +++ b/drivers/net/vmxnet3/Makefile @@ -0,0 +1,35 @@ +################################################################################ +# +# Linux driver for VMware's vmxnet3 ethernet NIC. +# +# Copyright (C) 2007-2009, VMware, Inc. All Rights Reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation; version 2 of the License and no later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or +# NON INFRINGEMENT. See the GNU General Public License for more +# details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# The full GNU General Public License is included in this distribution in +# the file called "COPYING". +# +# Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> +# +# +################################################################################ + +# +# Makefile for the VMware vmxnet3 ethernet NIC driver +# + +obj-$(CONFIG_VMXNET3) += vmxnet3.o + +vmxnet3-objs := vmxnet3_drv.o vmxnet3_ethtool.o diff --git a/drivers/net/vmxnet3/upt1_defs.h b/drivers/net/vmxnet3/upt1_defs.h new file mode 100644 index 00000000000..37108fb226d --- /dev/null +++ b/drivers/net/vmxnet3/upt1_defs.h @@ -0,0 +1,96 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _UPT1_DEFS_H +#define _UPT1_DEFS_H + +struct UPT1_TxStats { + u64 TSOPktsTxOK; /* TSO pkts post-segmentation */ + u64 TSOBytesTxOK; + u64 ucastPktsTxOK; + u64 ucastBytesTxOK; + u64 mcastPktsTxOK; + u64 mcastBytesTxOK; + u64 bcastPktsTxOK; + u64 bcastBytesTxOK; + u64 pktsTxError; + u64 pktsTxDiscard; +}; + +struct UPT1_RxStats { + u64 LROPktsRxOK; /* LRO pkts */ + u64 LROBytesRxOK; /* bytes from LRO pkts */ + /* the following counters are for pkts from the wire, i.e., pre-LRO */ + u64 ucastPktsRxOK; + u64 ucastBytesRxOK; + u64 mcastPktsRxOK; + u64 mcastBytesRxOK; + u64 bcastPktsRxOK; + u64 bcastBytesRxOK; + u64 pktsRxOutOfBuf; + u64 pktsRxError; +}; + +/* interrupt moderation level */ +enum { + UPT1_IML_NONE = 0, /* no interrupt moderation */ + UPT1_IML_HIGHEST = 7, /* least intr generated */ + UPT1_IML_ADAPTIVE = 8, /* adpative intr moderation */ +}; +/* values for UPT1_RSSConf.hashFunc */ +enum { + UPT1_RSS_HASH_TYPE_NONE = 0x0, + UPT1_RSS_HASH_TYPE_IPV4 = 0x01, + UPT1_RSS_HASH_TYPE_TCP_IPV4 = 0x02, + UPT1_RSS_HASH_TYPE_IPV6 = 0x04, + UPT1_RSS_HASH_TYPE_TCP_IPV6 = 0x08, +}; + +enum { + UPT1_RSS_HASH_FUNC_NONE = 0x0, + UPT1_RSS_HASH_FUNC_TOEPLITZ = 0x01, +}; + +#define UPT1_RSS_MAX_KEY_SIZE 40 +#define UPT1_RSS_MAX_IND_TABLE_SIZE 128 + +struct UPT1_RSSConf { + u16 hashType; + u16 hashFunc; + u16 hashKeySize; + u16 indTableSize; + u8 hashKey[UPT1_RSS_MAX_KEY_SIZE]; + u8 indTable[UPT1_RSS_MAX_IND_TABLE_SIZE]; +}; + +/* features */ +enum { + UPT1_F_RXCSUM = 0x0001, /* rx csum verification */ + UPT1_F_RSS = 0x0002, + UPT1_F_RXVLAN = 0x0004, /* VLAN tag stripping */ + UPT1_F_LRO = 0x0008, +}; +#endif diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h new file mode 100644 index 00000000000..dc8ee4438a4 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -0,0 +1,535 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _VMXNET3_DEFS_H_ +#define _VMXNET3_DEFS_H_ + +#include "upt1_defs.h" + +/* all registers are 32 bit wide */ +/* BAR 1 */ +enum { + VMXNET3_REG_VRRS = 0x0, /* Vmxnet3 Revision Report Selection */ + VMXNET3_REG_UVRS = 0x8, /* UPT Version Report Selection */ + VMXNET3_REG_DSAL = 0x10, /* Driver Shared Address Low */ + VMXNET3_REG_DSAH = 0x18, /* Driver Shared Address High */ + VMXNET3_REG_CMD = 0x20, /* Command */ + VMXNET3_REG_MACL = 0x28, /* MAC Address Low */ + VMXNET3_REG_MACH = 0x30, /* MAC Address High */ + VMXNET3_REG_ICR = 0x38, /* Interrupt Cause Register */ + VMXNET3_REG_ECR = 0x40 /* Event Cause Register */ +}; + +/* BAR 0 */ +enum { + VMXNET3_REG_IMR = 0x0, /* Interrupt Mask Register */ + VMXNET3_REG_TXPROD = 0x600, /* Tx Producer Index */ + VMXNET3_REG_RXPROD = 0x800, /* Rx Producer Index for ring 1 */ + VMXNET3_REG_RXPROD2 = 0xA00 /* Rx Producer Index for ring 2 */ +}; + +#define VMXNET3_PT_REG_SIZE 4096 /* BAR 0 */ +#define VMXNET3_VD_REG_SIZE 4096 /* BAR 1 */ + +#define VMXNET3_REG_ALIGN 8 /* All registers are 8-byte aligned. */ +#define VMXNET3_REG_ALIGN_MASK 0x7 + +/* I/O Mapped access to registers */ +#define VMXNET3_IO_TYPE_PT 0 +#define VMXNET3_IO_TYPE_VD 1 +#define VMXNET3_IO_ADDR(type, reg) (((type) << 24) | ((reg) & 0xFFFFFF)) +#define VMXNET3_IO_TYPE(addr) ((addr) >> 24) +#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) + +enum { + VMXNET3_CMD_FIRST_SET = 0xCAFE0000, + VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, + VMXNET3_CMD_QUIESCE_DEV, + VMXNET3_CMD_RESET_DEV, + VMXNET3_CMD_UPDATE_RX_MODE, + VMXNET3_CMD_UPDATE_MAC_FILTERS, + VMXNET3_CMD_UPDATE_VLAN_FILTERS, + VMXNET3_CMD_UPDATE_RSSIDT, + VMXNET3_CMD_UPDATE_IML, + VMXNET3_CMD_UPDATE_PMCFG, + VMXNET3_CMD_UPDATE_FEATURE, + VMXNET3_CMD_LOAD_PLUGIN, + + VMXNET3_CMD_FIRST_GET = 0xF00D0000, + VMXNET3_CMD_GET_QUEUE_STATUS = VMXNET3_CMD_FIRST_GET, + VMXNET3_CMD_GET_STATS, + VMXNET3_CMD_GET_LINK, + VMXNET3_CMD_GET_PERM_MAC_LO, + VMXNET3_CMD_GET_PERM_MAC_HI, + VMXNET3_CMD_GET_DID_LO, + VMXNET3_CMD_GET_DID_HI, + VMXNET3_CMD_GET_DEV_EXTRA_INFO, + VMXNET3_CMD_GET_CONF_INTR +}; + +struct Vmxnet3_TxDesc { + u64 addr; + + u32 len:14; + u32 gen:1; /* generation bit */ + u32 rsvd:1; + u32 dtype:1; /* descriptor type */ + u32 ext1:1; + u32 msscof:14; /* MSS, checksum offset, flags */ + + u32 hlen:10; /* header len */ + u32 om:2; /* offload mode */ + u32 eop:1; /* End Of Packet */ + u32 cq:1; /* completion request */ + u32 ext2:1; + u32 ti:1; /* VLAN Tag Insertion */ + u32 tci:16; /* Tag to Insert */ +}; + +/* TxDesc.OM values */ +#define VMXNET3_OM_NONE 0 +#define VMXNET3_OM_CSUM 2 +#define VMXNET3_OM_TSO 3 + +/* fields in TxDesc we access w/o using bit fields */ +#define VMXNET3_TXD_EOP_SHIFT 12 +#define VMXNET3_TXD_CQ_SHIFT 13 +#define VMXNET3_TXD_GEN_SHIFT 14 + +#define VMXNET3_TXD_CQ (1 << VMXNET3_TXD_CQ_SHIFT) +#define VMXNET3_TXD_EOP (1 << VMXNET3_TXD_EOP_SHIFT) +#define VMXNET3_TXD_GEN (1 << VMXNET3_TXD_GEN_SHIFT) + +#define VMXNET3_HDR_COPY_SIZE 128 + + +struct Vmxnet3_TxDataDesc { + u8 data[VMXNET3_HDR_COPY_SIZE]; +}; + + +struct Vmxnet3_TxCompDesc { + u32 txdIdx:12; /* Index of the EOP TxDesc */ + u32 ext1:20; + + u32 ext2; + u32 ext3; + + u32 rsvd:24; + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +}; + + +struct Vmxnet3_RxDesc { + u64 addr; + + u32 len:14; + u32 btype:1; /* Buffer Type */ + u32 dtype:1; /* Descriptor type */ + u32 rsvd:15; + u32 gen:1; /* Generation bit */ + + u32 ext1; +}; + +/* values of RXD.BTYPE */ +#define VMXNET3_RXD_BTYPE_HEAD 0 /* head only */ +#define VMXNET3_RXD_BTYPE_BODY 1 /* body only */ + +/* fields in RxDesc we access w/o using bit fields */ +#define VMXNET3_RXD_BTYPE_SHIFT 14 +#define VMXNET3_RXD_GEN_SHIFT 31 + + +struct Vmxnet3_RxCompDesc { + u32 rxdIdx:12; /* Index of the RxDesc */ + u32 ext1:2; + u32 eop:1; /* End of Packet */ + u32 sop:1; /* Start of Packet */ + u32 rqID:10; /* rx queue/ring ID */ + u32 rssType:4; /* RSS hash type used */ + u32 cnc:1; /* Checksum Not Calculated */ + u32 ext2:1; + + u32 rssHash; /* RSS hash value */ + + u32 len:14; /* data length */ + u32 err:1; /* Error */ + u32 ts:1; /* Tag is stripped */ + u32 tci:16; /* Tag stripped */ + + u32 csum:16; + u32 tuc:1; /* TCP/UDP Checksum Correct */ + u32 udp:1; /* UDP packet */ + u32 tcp:1; /* TCP packet */ + u32 ipc:1; /* IP Checksum Correct */ + u32 v6:1; /* IPv6 */ + u32 v4:1; /* IPv4 */ + u32 frg:1; /* IP Fragment */ + u32 fcs:1; /* Frame CRC correct */ + u32 type:7; /* completion type */ + u32 gen:1; /* generation bit */ +}; + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.dword[3] */ +#define VMXNET3_RCD_TUC_SHIFT 16 +#define VMXNET3_RCD_IPC_SHIFT 19 + +/* fields in RxCompDesc we access via Vmxnet3_GenericDesc.qword[1] */ +#define VMXNET3_RCD_TYPE_SHIFT 56 +#define VMXNET3_RCD_GEN_SHIFT 63 + +/* csum OK for TCP/UDP pkts over IP */ +#define VMXNET3_RCD_CSUM_OK (1 << VMXNET3_RCD_TUC_SHIFT | \ + 1 << VMXNET3_RCD_IPC_SHIFT) + +/* value of RxCompDesc.rssType */ +enum { + VMXNET3_RCD_RSS_TYPE_NONE = 0, + VMXNET3_RCD_RSS_TYPE_IPV4 = 1, + VMXNET3_RCD_RSS_TYPE_TCPIPV4 = 2, + VMXNET3_RCD_RSS_TYPE_IPV6 = 3, + VMXNET3_RCD_RSS_TYPE_TCPIPV6 = 4, +}; + + +/* a union for accessing all cmd/completion descriptors */ +union Vmxnet3_GenericDesc { + u64 qword[2]; + u32 dword[4]; + u16 word[8]; + struct Vmxnet3_TxDesc txd; + struct Vmxnet3_RxDesc rxd; + struct Vmxnet3_TxCompDesc tcd; + struct Vmxnet3_RxCompDesc rcd; +}; + +#define VMXNET3_INIT_GEN 1 + +/* Max size of a single tx buffer */ +#define VMXNET3_MAX_TX_BUF_SIZE (1 << 14) + +/* # of tx desc needed for a tx buffer size */ +#define VMXNET3_TXD_NEEDED(size) (((size) + VMXNET3_MAX_TX_BUF_SIZE - 1) / \ + VMXNET3_MAX_TX_BUF_SIZE) + +/* max # of tx descs for a non-tso pkt */ +#define VMXNET3_MAX_TXD_PER_PKT 16 + +/* Max size of a single rx buffer */ +#define VMXNET3_MAX_RX_BUF_SIZE ((1 << 14) - 1) +/* Minimum size of a type 0 buffer */ +#define VMXNET3_MIN_T0_BUF_SIZE 128 +#define VMXNET3_MAX_CSUM_OFFSET 1024 + +/* Ring base address alignment */ +#define VMXNET3_RING_BA_ALIGN 512 +#define VMXNET3_RING_BA_MASK (VMXNET3_RING_BA_ALIGN - 1) + +/* Ring size must be a multiple of 32 */ +#define VMXNET3_RING_SIZE_ALIGN 32 +#define VMXNET3_RING_SIZE_MASK (VMXNET3_RING_SIZE_ALIGN - 1) + +/* Max ring size */ +#define VMXNET3_TX_RING_MAX_SIZE 4096 +#define VMXNET3_TC_RING_MAX_SIZE 4096 +#define VMXNET3_RX_RING_MAX_SIZE 4096 +#define VMXNET3_RC_RING_MAX_SIZE 8192 + +/* a list of reasons for queue stop */ + +enum { + VMXNET3_ERR_NOEOP = 0x80000000, /* cannot find the EOP desc of a pkt */ + VMXNET3_ERR_TXD_REUSE = 0x80000001, /* reuse TxDesc before tx completion */ + VMXNET3_ERR_BIG_PKT = 0x80000002, /* too many TxDesc for a pkt */ + VMXNET3_ERR_DESC_NOT_SPT = 0x80000003, /* descriptor type not supported */ + VMXNET3_ERR_SMALL_BUF = 0x80000004, /* type 0 buffer too small */ + VMXNET3_ERR_STRESS = 0x80000005, /* stress option firing in vmkernel */ + VMXNET3_ERR_SWITCH = 0x80000006, /* mode switch failure */ + VMXNET3_ERR_TXD_INVALID = 0x80000007, /* invalid TxDesc */ +}; + +/* completion descriptor types */ +#define VMXNET3_CDTYPE_TXCOMP 0 /* Tx Completion Descriptor */ +#define VMXNET3_CDTYPE_RXCOMP 3 /* Rx Completion Descriptor */ + +enum { + VMXNET3_GOS_BITS_UNK = 0, /* unknown */ + VMXNET3_GOS_BITS_32 = 1, + VMXNET3_GOS_BITS_64 = 2, +}; + +#define VMXNET3_GOS_TYPE_LINUX 1 + + +struct Vmxnet3_GOSInfo { + u32 gosBits:2; /* 32-bit or 64-bit? */ + u32 gosType:4; /* which guest */ + u32 gosVer:16; /* gos version */ + u32 gosMisc:10; /* other info about gos */ +}; + + +struct Vmxnet3_DriverInfo { + u32 version; + struct Vmxnet3_GOSInfo gos; + u32 vmxnet3RevSpt; + u32 uptVerSpt; +}; + + +#define VMXNET3_REV1_MAGIC 0xbabefee1 + +/* + * QueueDescPA must be 128 bytes aligned. It points to an array of + * Vmxnet3_TxQueueDesc followed by an array of Vmxnet3_RxQueueDesc. + * The number of Vmxnet3_TxQueueDesc/Vmxnet3_RxQueueDesc are specified by + * Vmxnet3_MiscConf.numTxQueues/numRxQueues, respectively. + */ +#define VMXNET3_QUEUE_DESC_ALIGN 128 + + +struct Vmxnet3_MiscConf { + struct Vmxnet3_DriverInfo driverInfo; + u64 uptFeatures; + u64 ddPA; /* driver data PA */ + u64 queueDescPA; /* queue descriptor table PA */ + u32 ddLen; /* driver data len */ + u32 queueDescLen; /* queue desc. table len in bytes */ + u32 mtu; + u16 maxNumRxSG; + u8 numTxQueues; + u8 numRxQueues; + u32 reserved[4]; +}; + + +struct Vmxnet3_TxQueueConf { + u64 txRingBasePA; + u64 dataRingBasePA; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 txRingSize; /* # of tx desc */ + u32 dataRingSize; /* # of data desc */ + u32 compRingSize; /* # of comp desc */ + u32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +struct Vmxnet3_RxQueueConf { + u64 rxRingBasePA[2]; + u64 compRingBasePA; + u64 ddPA; /* driver data */ + u64 reserved; + u32 rxRingSize[2]; /* # of rx desc */ + u32 compRingSize; /* # of rx comp desc */ + u32 ddLen; /* size of driver data */ + u8 intrIdx; + u8 _pad[7]; +}; + + +enum vmxnet3_intr_mask_mode { + VMXNET3_IMM_AUTO = 0, + VMXNET3_IMM_ACTIVE = 1, + VMXNET3_IMM_LAZY = 2 +}; + +enum vmxnet3_intr_type { + VMXNET3_IT_AUTO = 0, + VMXNET3_IT_INTX = 1, + VMXNET3_IT_MSI = 2, + VMXNET3_IT_MSIX = 3 +}; + +#define VMXNET3_MAX_TX_QUEUES 8 +#define VMXNET3_MAX_RX_QUEUES 16 +/* addition 1 for events */ +#define VMXNET3_MAX_INTRS 25 + + +struct Vmxnet3_IntrConf { + bool autoMask; + u8 numIntrs; /* # of interrupts */ + u8 eventIntrIdx; + u8 modLevels[VMXNET3_MAX_INTRS]; /* moderation level for + * each intr */ + u32 reserved[3]; +}; + +/* one bit per VLAN ID, the size is in the units of u32 */ +#define VMXNET3_VFT_SIZE (4096 / (sizeof(u32) * 8)) + + +struct Vmxnet3_QueueStatus { + bool stopped; + u8 _pad[3]; + u32 error; +}; + + +struct Vmxnet3_TxQueueCtrl { + u32 txNumDeferred; + u32 txThreshold; + u64 reserved; +}; + + +struct Vmxnet3_RxQueueCtrl { + bool updateRxProd; + u8 _pad[7]; + u64 reserved; +}; + +enum { + VMXNET3_RXM_UCAST = 0x01, /* unicast only */ + VMXNET3_RXM_MCAST = 0x02, /* multicast passing the filters */ + VMXNET3_RXM_BCAST = 0x04, /* broadcast only */ + VMXNET3_RXM_ALL_MULTI = 0x08, /* all multicast */ + VMXNET3_RXM_PROMISC = 0x10 /* promiscuous */ +}; + +struct Vmxnet3_RxFilterConf { + u32 rxMode; /* VMXNET3_RXM_xxx */ + u16 mfTableLen; /* size of the multicast filter table */ + u16 _pad1; + u64 mfTablePA; /* PA of the multicast filters table */ + u32 vfTable[VMXNET3_VFT_SIZE]; /* vlan filter */ +}; + + +#define VMXNET3_PM_MAX_FILTERS 6 +#define VMXNET3_PM_MAX_PATTERN_SIZE 128 +#define VMXNET3_PM_MAX_MASK_SIZE (VMXNET3_PM_MAX_PATTERN_SIZE / 8) + +#define VMXNET3_PM_WAKEUP_MAGIC 0x01 /* wake up on magic pkts */ +#define VMXNET3_PM_WAKEUP_FILTER 0x02 /* wake up on pkts matching + * filters */ + + +struct Vmxnet3_PM_PktFilter { + u8 maskSize; + u8 patternSize; + u8 mask[VMXNET3_PM_MAX_MASK_SIZE]; + u8 pattern[VMXNET3_PM_MAX_PATTERN_SIZE]; + u8 pad[6]; +}; + + +struct Vmxnet3_PMConf { + u16 wakeUpEvents; /* VMXNET3_PM_WAKEUP_xxx */ + u8 numFilters; + u8 pad[5]; + struct Vmxnet3_PM_PktFilter filters[VMXNET3_PM_MAX_FILTERS]; +}; + + +struct Vmxnet3_VariableLenConfDesc { + u32 confVer; + u32 confLen; + u64 confPA; +}; + + +struct Vmxnet3_TxQueueDesc { + struct Vmxnet3_TxQueueCtrl ctrl; + struct Vmxnet3_TxQueueConf conf; + + /* Driver read after a GET command */ + struct Vmxnet3_QueueStatus status; + struct UPT1_TxStats stats; + u8 _pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_RxQueueDesc { + struct Vmxnet3_RxQueueCtrl ctrl; + struct Vmxnet3_RxQueueConf conf; + /* Driver read after a GET commad */ + struct Vmxnet3_QueueStatus status; + struct UPT1_RxStats stats; + u8 __pad[88]; /* 128 aligned */ +}; + + +struct Vmxnet3_DSDevRead { + /* read-only region for device, read by dev in response to a SET cmd */ + struct Vmxnet3_MiscConf misc; + struct Vmxnet3_IntrConf intrConf; + struct Vmxnet3_RxFilterConf rxFilterConf; + struct Vmxnet3_VariableLenConfDesc rssConfDesc; + struct Vmxnet3_VariableLenConfDesc pmConfDesc; + struct Vmxnet3_VariableLenConfDesc pluginConfDesc; +}; + +/* All structures in DriverShared are padded to multiples of 8 bytes */ +struct Vmxnet3_DriverShared { + u32 magic; + /* make devRead start at 64bit boundaries */ + u32 pad; + struct Vmxnet3_DSDevRead devRead; + u32 ecr; + u32 reserved[5]; +}; + + +#define VMXNET3_ECR_RQERR (1 << 0) +#define VMXNET3_ECR_TQERR (1 << 1) +#define VMXNET3_ECR_LINK (1 << 2) +#define VMXNET3_ECR_DIC (1 << 3) +#define VMXNET3_ECR_DEBUG (1 << 4) + +/* flip the gen bit of a ring */ +#define VMXNET3_FLIP_RING_GEN(gen) ((gen) = (gen) ^ 0x1) + +/* only use this if moving the idx won't affect the gen bit */ +#define VMXNET3_INC_RING_IDX_ONLY(idx, ring_size) \ + do {\ + (idx)++;\ + if (unlikely((idx) == (ring_size))) {\ + (idx) = 0;\ + } \ + } while (0) + +#define VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] |= (1 << (vid & 31))) +#define VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid) \ + (vfTable[vid >> 5] &= ~(1 << (vid & 31))) + +#define VMXNET3_VFTABLE_ENTRY_IS_SET(vfTable, vid) \ + ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) + +#define VMXNET3_MAX_MTU 9000 +#define VMXNET3_MIN_MTU 60 + +#define VMXNET3_LINK_UP (10000 << 16 | 1) /* 10 Gbps, up */ +#define VMXNET3_LINK_DOWN 0 + +#endif /* _VMXNET3_DEFS_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c new file mode 100644 index 00000000000..004353a46af --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -0,0 +1,2574 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#include "vmxnet3_int.h" + +char vmxnet3_driver_name[] = "vmxnet3"; +#define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver" + + +/* + * PCI Device ID Table + * Last entry must be all 0s + */ +static const struct pci_device_id vmxnet3_pciid_table[] = { + {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)}, + {0} +}; + +MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table); + +static atomic_t devices_found; + + +/* + * Enable/Disable the given intr + */ +static void +vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) +{ + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0); +} + + +static void +vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx) +{ + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1); +} + + +/* + * Enable/Disable all intrs used by the device + */ +static void +vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + vmxnet3_enable_intr(adapter, i); +} + + +static void +vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + vmxnet3_disable_intr(adapter, i); +} + + +static void +vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events) +{ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events); +} + + +static bool +vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + return netif_queue_stopped(adapter->netdev); +} + + +static void +vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = false; + netif_start_queue(adapter->netdev); +} + + +static void +vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = false; + netif_wake_queue(adapter->netdev); +} + + +static void +vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) +{ + tq->stopped = true; + tq->num_stop++; + netif_stop_queue(adapter->netdev); +} + + +/* + * Check the link state. This may start or stop the tx queue. + */ +static void +vmxnet3_check_link(struct vmxnet3_adapter *adapter) +{ + u32 ret; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + adapter->link_speed = ret >> 16; + if (ret & 1) { /* Link is up. */ + printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n", + adapter->netdev->name, adapter->link_speed); + if (!netif_carrier_ok(adapter->netdev)) + netif_carrier_on(adapter->netdev); + + vmxnet3_tq_start(&adapter->tx_queue, adapter); + } else { + printk(KERN_INFO "%s: NIC Link is Down\n", + adapter->netdev->name); + if (netif_carrier_ok(adapter->netdev)) + netif_carrier_off(adapter->netdev); + + vmxnet3_tq_stop(&adapter->tx_queue, adapter); + } +} + + +static void +vmxnet3_process_events(struct vmxnet3_adapter *adapter) +{ + u32 events = adapter->shared->ecr; + if (!events) + return; + + vmxnet3_ack_events(adapter, events); + + /* Check if link state has changed */ + if (events & VMXNET3_ECR_LINK) + vmxnet3_check_link(adapter); + + /* Check if there is an error on xmit/recv queues */ + if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_QUEUE_STATUS); + + if (adapter->tqd_start->status.stopped) { + printk(KERN_ERR "%s: tq error 0x%x\n", + adapter->netdev->name, + adapter->tqd_start->status.error); + } + if (adapter->rqd_start->status.stopped) { + printk(KERN_ERR "%s: rq error 0x%x\n", + adapter->netdev->name, + adapter->rqd_start->status.error); + } + + schedule_work(&adapter->work); + } +} + + +static void +vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi, + struct pci_dev *pdev) +{ + if (tbi->map_type == VMXNET3_MAP_SINGLE) + pci_unmap_single(pdev, tbi->dma_addr, tbi->len, + PCI_DMA_TODEVICE); + else if (tbi->map_type == VMXNET3_MAP_PAGE) + pci_unmap_page(pdev, tbi->dma_addr, tbi->len, + PCI_DMA_TODEVICE); + else + BUG_ON(tbi->map_type != VMXNET3_MAP_NONE); + + tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */ +} + + +static int +vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, + struct pci_dev *pdev, struct vmxnet3_adapter *adapter) +{ + struct sk_buff *skb; + int entries = 0; + + /* no out of order completion */ + BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); + BUG_ON(tq->tx_ring.base[eop_idx].txd.eop != 1); + + skb = tq->buf_info[eop_idx].skb; + BUG_ON(skb == NULL); + tq->buf_info[eop_idx].skb = NULL; + + VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); + + while (tq->tx_ring.next2comp != eop_idx) { + vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, + pdev); + + /* update next2comp w/o tx_lock. Since we are marking more, + * instead of less, tx ring entries avail, the worst case is + * that the tx routine incorrectly re-queues a pkt due to + * insufficient tx ring entries. + */ + vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); + entries++; + } + + dev_kfree_skb_any(skb); + return entries; +} + + +static int +vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int completed = 0; + union Vmxnet3_GenericDesc *gdesc; + + gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; + while (gdesc->tcd.gen == tq->comp_ring.gen) { + completed += vmxnet3_unmap_pkt(gdesc->tcd.txdIdx, tq, + adapter->pdev, adapter); + + vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); + gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; + } + + if (completed) { + spin_lock(&tq->tx_lock); + if (unlikely(vmxnet3_tq_stopped(tq, adapter) && + vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > + VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && + netif_carrier_ok(adapter->netdev))) { + vmxnet3_tq_wake(tq, adapter); + } + spin_unlock(&tq->tx_lock); + } + return completed; +} + + +static void +vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int i; + + while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { + struct vmxnet3_tx_buf_info *tbi; + union Vmxnet3_GenericDesc *gdesc; + + tbi = tq->buf_info + tq->tx_ring.next2comp; + gdesc = tq->tx_ring.base + tq->tx_ring.next2comp; + + vmxnet3_unmap_tx_buf(tbi, adapter->pdev); + if (tbi->skb) { + dev_kfree_skb_any(tbi->skb); + tbi->skb = NULL; + } + vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); + } + + /* sanity check, verify all buffers are indeed unmapped and freed */ + for (i = 0; i < tq->tx_ring.size; i++) { + BUG_ON(tq->buf_info[i].skb != NULL || + tq->buf_info[i].map_type != VMXNET3_MAP_NONE); + } + + tq->tx_ring.gen = VMXNET3_INIT_GEN; + tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; + + tq->comp_ring.gen = VMXNET3_INIT_GEN; + tq->comp_ring.next2proc = 0; +} + + +void +vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + if (tq->tx_ring.base) { + pci_free_consistent(adapter->pdev, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc), + tq->tx_ring.base, tq->tx_ring.basePA); + tq->tx_ring.base = NULL; + } + if (tq->data_ring.base) { + pci_free_consistent(adapter->pdev, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + tq->data_ring.base, tq->data_ring.basePA); + tq->data_ring.base = NULL; + } + if (tq->comp_ring.base) { + pci_free_consistent(adapter->pdev, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + tq->comp_ring.base, tq->comp_ring.basePA); + tq->comp_ring.base = NULL; + } + kfree(tq->buf_info); + tq->buf_info = NULL; +} + + +static void +vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + int i; + + /* reset the tx ring contents to 0 and reset the tx ring states */ + memset(tq->tx_ring.base, 0, tq->tx_ring.size * + sizeof(struct Vmxnet3_TxDesc)); + tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; + tq->tx_ring.gen = VMXNET3_INIT_GEN; + + memset(tq->data_ring.base, 0, tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc)); + + /* reset the tx comp ring contents to 0 and reset comp ring states */ + memset(tq->comp_ring.base, 0, tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc)); + tq->comp_ring.next2proc = 0; + tq->comp_ring.gen = VMXNET3_INIT_GEN; + + /* reset the bookkeeping data */ + memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); + for (i = 0; i < tq->tx_ring.size; i++) + tq->buf_info[i].map_type = VMXNET3_MAP_NONE; + + /* stats are not reset */ +} + + +static int +vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter) +{ + BUG_ON(tq->tx_ring.base || tq->data_ring.base || + tq->comp_ring.base || tq->buf_info); + + tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size + * sizeof(struct Vmxnet3_TxDesc), + &tq->tx_ring.basePA); + if (!tq->tx_ring.base) { + printk(KERN_ERR "%s: failed to allocate tx ring\n", + adapter->netdev->name); + goto err; + } + + tq->data_ring.base = pci_alloc_consistent(adapter->pdev, + tq->data_ring.size * + sizeof(struct Vmxnet3_TxDataDesc), + &tq->data_ring.basePA); + if (!tq->data_ring.base) { + printk(KERN_ERR "%s: failed to allocate data ring\n", + adapter->netdev->name); + goto err; + } + + tq->comp_ring.base = pci_alloc_consistent(adapter->pdev, + tq->comp_ring.size * + sizeof(struct Vmxnet3_TxCompDesc), + &tq->comp_ring.basePA); + if (!tq->comp_ring.base) { + printk(KERN_ERR "%s: failed to allocate tx comp ring\n", + adapter->netdev->name); + goto err; + } + + tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]), + GFP_KERNEL); + if (!tq->buf_info) { + printk(KERN_ERR "%s: failed to allocate tx bufinfo\n", + adapter->netdev->name); + goto err; + } + + return 0; + +err: + vmxnet3_tq_destroy(tq, adapter); + return -ENOMEM; +} + + +/* + * starting from ring->next2fill, allocate rx buffers for the given ring + * of the rx queue and update the rx desc. stop after @num_to_alloc buffers + * are allocated or allocation fails + */ + +static int +vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, + int num_to_alloc, struct vmxnet3_adapter *adapter) +{ + int num_allocated = 0; + struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; + struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; + u32 val; + + while (num_allocated < num_to_alloc) { + struct vmxnet3_rx_buf_info *rbi; + union Vmxnet3_GenericDesc *gd; + + rbi = rbi_base + ring->next2fill; + gd = ring->base + ring->next2fill; + + if (rbi->buf_type == VMXNET3_RX_BUF_SKB) { + if (rbi->skb == NULL) { + rbi->skb = dev_alloc_skb(rbi->len + + NET_IP_ALIGN); + if (unlikely(rbi->skb == NULL)) { + rq->stats.rx_buf_alloc_failure++; + break; + } + rbi->skb->dev = adapter->netdev; + + skb_reserve(rbi->skb, NET_IP_ALIGN); + rbi->dma_addr = pci_map_single(adapter->pdev, + rbi->skb->data, rbi->len, + PCI_DMA_FROMDEVICE); + } else { + /* rx buffer skipped by the device */ + } + val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT; + } else { + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE || + rbi->len != PAGE_SIZE); + + if (rbi->page == NULL) { + rbi->page = alloc_page(GFP_ATOMIC); + if (unlikely(rbi->page == NULL)) { + rq->stats.rx_buf_alloc_failure++; + break; + } + rbi->dma_addr = pci_map_page(adapter->pdev, + rbi->page, 0, PAGE_SIZE, + PCI_DMA_FROMDEVICE); + } else { + /* rx buffers skipped by the device */ + } + val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT; + } + + BUG_ON(rbi->dma_addr == 0); + gd->rxd.addr = rbi->dma_addr; + gd->dword[2] = (ring->gen << VMXNET3_RXD_GEN_SHIFT) | val | + rbi->len; + + num_allocated++; + vmxnet3_cmd_ring_adv_next2fill(ring); + } + rq->uncommitted[ring_idx] += num_allocated; + + dev_dbg(&adapter->netdev->dev, + "alloc_rx_buf: %d allocated, next2fill %u, next2comp " + "%u, uncommited %u\n", num_allocated, ring->next2fill, + ring->next2comp, rq->uncommitted[ring_idx]); + + /* so that the device can distinguish a full ring and an empty ring */ + BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp); + + return num_allocated; +} + + +static void +vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_buf_info *rbi) +{ + struct skb_frag_struct *frag = skb_shinfo(skb)->frags + + skb_shinfo(skb)->nr_frags; + + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); + + frag->page = rbi->page; + frag->page_offset = 0; + frag->size = rcd->len; + skb->data_len += frag->size; + skb_shinfo(skb)->nr_frags++; +} + + +static void +vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, + struct vmxnet3_adapter *adapter) +{ + u32 dw2, len; + unsigned long buf_offset; + int i; + union Vmxnet3_GenericDesc *gdesc; + struct vmxnet3_tx_buf_info *tbi = NULL; + + BUG_ON(ctx->copy_size > skb_headlen(skb)); + + /* use the previous gen bit for the SOP desc */ + dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; + + ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; + gdesc = ctx->sop_txd; /* both loops below can be skipped */ + + /* no need to map the buffer if headers are copied */ + if (ctx->copy_size) { + ctx->sop_txd->txd.addr = tq->data_ring.basePA + + tq->tx_ring.next2fill * + sizeof(struct Vmxnet3_TxDataDesc); + ctx->sop_txd->dword[2] = dw2 | ctx->copy_size; + ctx->sop_txd->dword[3] = 0; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_NONE; + + dev_dbg(&adapter->netdev->dev, + "txd[%u]: 0x%Lx 0x%x 0x%x\n", + tq->tx_ring.next2fill, ctx->sop_txd->txd.addr, + ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + + /* use the right gen for non-SOP desc */ + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } + + /* linear part can use multiple tx desc if it's big */ + len = skb_headlen(skb) - ctx->copy_size; + buf_offset = ctx->copy_size; + while (len) { + u32 buf_size; + + buf_size = len > VMXNET3_MAX_TX_BUF_SIZE ? + VMXNET3_MAX_TX_BUF_SIZE : len; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_SINGLE; + tbi->dma_addr = pci_map_single(adapter->pdev, + skb->data + buf_offset, buf_size, + PCI_DMA_TODEVICE); + + tbi->len = buf_size; /* this automatically convert 2^14 to 0 */ + + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | buf_size; + gdesc->dword[3] = 0; + + dev_dbg(&adapter->netdev->dev, + "txd[%u]: 0x%Lx 0x%x 0x%x\n", + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + + len -= buf_size; + buf_offset += buf_size; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + + tbi = tq->buf_info + tq->tx_ring.next2fill; + tbi->map_type = VMXNET3_MAP_PAGE; + tbi->dma_addr = pci_map_page(adapter->pdev, frag->page, + frag->page_offset, frag->size, + PCI_DMA_TODEVICE); + + tbi->len = frag->size; + + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); + + gdesc->txd.addr = tbi->dma_addr; + gdesc->dword[2] = dw2 | frag->size; + gdesc->dword[3] = 0; + + dev_dbg(&adapter->netdev->dev, + "txd[%u]: 0x%llu %u %u\n", + tq->tx_ring.next2fill, gdesc->txd.addr, + gdesc->dword[2], gdesc->dword[3]); + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; + } + + ctx->eop_txd = gdesc; + + /* set the last buf_info for the pkt */ + tbi->skb = skb; + tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; +} + + +/* + * parse and copy relevant protocol headers: + * For a tso pkt, relevant headers are L2/3/4 including options + * For a pkt requesting csum offloading, they are L2/3 and may include L4 + * if it's a TCP/UDP pkt + * + * Returns: + * -1: error happens during parsing + * 0: protocol headers parsed, but too big to be copied + * 1: protocol headers parsed and copied + * + * Other effects: + * 1. related *ctx fields are updated. + * 2. ctx->copy_size is # of bytes copied + * 3. the portion copied is guaranteed to be in the linear part + * + */ +static int +vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_tx_ctx *ctx, + struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_TxDataDesc *tdd; + + if (ctx->mss) { + ctx->eth_ip_hdr_size = skb_transport_offset(skb); + ctx->l4_hdr_size = ((struct tcphdr *) + skb_transport_header(skb))->doff * 4; + ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size; + } else { + unsigned int pull_size; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + ctx->eth_ip_hdr_size = skb_transport_offset(skb); + + if (ctx->ipv4) { + struct iphdr *iph = (struct iphdr *) + skb_network_header(skb); + if (iph->protocol == IPPROTO_TCP) { + pull_size = ctx->eth_ip_hdr_size + + sizeof(struct tcphdr); + + if (unlikely(!pskb_may_pull(skb, + pull_size))) { + goto err; + } + ctx->l4_hdr_size = ((struct tcphdr *) + skb_transport_header(skb))->doff * 4; + } else if (iph->protocol == IPPROTO_UDP) { + ctx->l4_hdr_size = + sizeof(struct udphdr); + } else { + ctx->l4_hdr_size = 0; + } + } else { + /* for simplicity, don't copy L4 headers */ + ctx->l4_hdr_size = 0; + } + ctx->copy_size = ctx->eth_ip_hdr_size + + ctx->l4_hdr_size; + } else { + ctx->eth_ip_hdr_size = 0; + ctx->l4_hdr_size = 0; + /* copy as much as allowed */ + ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE + , skb_headlen(skb)); + } + + /* make sure headers are accessible directly */ + if (unlikely(!pskb_may_pull(skb, ctx->copy_size))) + goto err; + } + + if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) { + tq->stats.oversized_hdr++; + ctx->copy_size = 0; + return 0; + } + + tdd = tq->data_ring.base + tq->tx_ring.next2fill; + + memcpy(tdd->data, skb->data, ctx->copy_size); + dev_dbg(&adapter->netdev->dev, + "copy %u bytes to dataRing[%u]\n", + ctx->copy_size, tq->tx_ring.next2fill); + return 1; + +err: + return -1; +} + + +static void +vmxnet3_prepare_tso(struct sk_buff *skb, + struct vmxnet3_tx_ctx *ctx) +{ + struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); + if (ctx->ipv4) { + struct iphdr *iph = (struct iphdr *)skb_network_header(skb); + iph->check = 0; + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + } else { + struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb); + tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0, + IPPROTO_TCP, 0); + } +} + + +/* + * Transmits a pkt thru a given tq + * Returns: + * NETDEV_TX_OK: descriptors are setup successfully + * NETDEV_TX_OK: error occured, the pkt is dropped + * NETDEV_TX_BUSY: tx ring is full, queue is stopped + * + * Side-effects: + * 1. tx ring may be changed + * 2. tq stats may be updated accordingly + * 3. shared->txNumDeferred may be updated + */ + +static int +vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter, struct net_device *netdev) +{ + int ret; + u32 count; + unsigned long flags; + struct vmxnet3_tx_ctx ctx; + union Vmxnet3_GenericDesc *gdesc; + + /* conservatively estimate # of descriptors to use */ + count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + + skb_shinfo(skb)->nr_frags + 1; + + ctx.ipv4 = (skb->protocol == __constant_ntohs(ETH_P_IP)); + + ctx.mss = skb_shinfo(skb)->gso_size; + if (ctx.mss) { + if (skb_header_cloned(skb)) { + if (unlikely(pskb_expand_head(skb, 0, 0, + GFP_ATOMIC) != 0)) { + tq->stats.drop_tso++; + goto drop_pkt; + } + tq->stats.copy_skb_header++; + } + vmxnet3_prepare_tso(skb, &ctx); + } else { + if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) { + + /* non-tso pkts must not use more than + * VMXNET3_MAX_TXD_PER_PKT entries + */ + if (skb_linearize(skb) != 0) { + tq->stats.drop_too_many_frags++; + goto drop_pkt; + } + tq->stats.linearized++; + + /* recalculate the # of descriptors to use */ + count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; + } + } + + ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); + if (ret >= 0) { + BUG_ON(ret <= 0 && ctx.copy_size != 0); + /* hdrs parsed, check against other limits */ + if (ctx.mss) { + if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > + VMXNET3_MAX_TX_BUF_SIZE)) { + goto hdr_too_big; + } + } else { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (unlikely(ctx.eth_ip_hdr_size + + skb->csum_offset > + VMXNET3_MAX_CSUM_OFFSET)) { + goto hdr_too_big; + } + } + } + } else { + tq->stats.drop_hdr_inspect_err++; + goto drop_pkt; + } + + spin_lock_irqsave(&tq->tx_lock, flags); + + if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { + tq->stats.tx_ring_full++; + dev_dbg(&adapter->netdev->dev, + "tx queue stopped on %s, next2comp %u" + " next2fill %u\n", adapter->netdev->name, + tq->tx_ring.next2comp, tq->tx_ring.next2fill); + + vmxnet3_tq_stop(tq, adapter); + spin_unlock_irqrestore(&tq->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + /* fill tx descs related to addr & len */ + vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter); + + /* setup the EOP desc */ + ctx.eop_txd->dword[3] = VMXNET3_TXD_CQ | VMXNET3_TXD_EOP; + + /* setup the SOP desc */ + gdesc = ctx.sop_txd; + if (ctx.mss) { + gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; + gdesc->txd.om = VMXNET3_OM_TSO; + gdesc->txd.msscof = ctx.mss; + tq->shared->txNumDeferred += (skb->len - gdesc->txd.hlen + + ctx.mss - 1) / ctx.mss; + } else { + if (skb->ip_summed == CHECKSUM_PARTIAL) { + gdesc->txd.hlen = ctx.eth_ip_hdr_size; + gdesc->txd.om = VMXNET3_OM_CSUM; + gdesc->txd.msscof = ctx.eth_ip_hdr_size + + skb->csum_offset; + } else { + gdesc->txd.om = 0; + gdesc->txd.msscof = 0; + } + tq->shared->txNumDeferred++; + } + + if (vlan_tx_tag_present(skb)) { + gdesc->txd.ti = 1; + gdesc->txd.tci = vlan_tx_tag_get(skb); + } + + wmb(); + + /* finally flips the GEN bit of the SOP desc */ + gdesc->dword[2] ^= VMXNET3_TXD_GEN; + dev_dbg(&adapter->netdev->dev, + "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n", + (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd - + tq->tx_ring.base), gdesc->txd.addr, gdesc->dword[2], + gdesc->dword[3]); + + spin_unlock_irqrestore(&tq->tx_lock, flags); + + if (tq->shared->txNumDeferred >= tq->shared->txThreshold) { + tq->shared->txNumDeferred = 0; + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, + tq->tx_ring.next2fill); + } + netdev->trans_start = jiffies; + + return NETDEV_TX_OK; + +hdr_too_big: + tq->stats.drop_oversized_hdr++; +drop_pkt: + tq->stats.drop_total++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + + +static netdev_tx_t +vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct vmxnet3_tx_queue *tq = &adapter->tx_queue; + + return vmxnet3_tq_xmit(skb, tq, adapter, netdev); +} + + +static void +vmxnet3_rx_csum(struct vmxnet3_adapter *adapter, + struct sk_buff *skb, + union Vmxnet3_GenericDesc *gdesc) +{ + if (!gdesc->rcd.cnc && adapter->rxcsum) { + /* typical case: TCP/UDP over IP and both csums are correct */ + if ((gdesc->dword[3] & VMXNET3_RCD_CSUM_OK) == + VMXNET3_RCD_CSUM_OK) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp)); + BUG_ON(!(gdesc->rcd.v4 || gdesc->rcd.v6)); + BUG_ON(gdesc->rcd.frg); + } else { + if (gdesc->rcd.csum) { + skb->csum = htons(gdesc->rcd.csum); + skb->ip_summed = CHECKSUM_PARTIAL; + } else { + skb->ip_summed = CHECKSUM_NONE; + } + } + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + + +static void +vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, + struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter) +{ + rq->stats.drop_err++; + if (!rcd->fcs) + rq->stats.drop_fcs++; + + rq->stats.drop_total++; + + /* + * We do not unmap and chain the rx buffer to the skb. + * We basically pretend this buffer is not used and will be recycled + * by vmxnet3_rq_alloc_rx_buf() + */ + + /* + * ctx->skb may be NULL if this is the first and the only one + * desc for the pkt + */ + if (ctx->skb) + dev_kfree_skb_irq(ctx->skb); + + ctx->skb = NULL; +} + + +static int +vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter, int quota) +{ + static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; + u32 num_rxd = 0; + struct Vmxnet3_RxCompDesc *rcd; + struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; + + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; + while (rcd->gen == rq->comp_ring.gen) { + struct vmxnet3_rx_buf_info *rbi; + struct sk_buff *skb; + int num_to_alloc; + struct Vmxnet3_RxDesc *rxd; + u32 idx, ring_idx; + + if (num_rxd >= quota) { + /* we may stop even before we see the EOP desc of + * the current pkt + */ + break; + } + num_rxd++; + + idx = rcd->rxdIdx; + ring_idx = rcd->rqID == rq->qid ? 0 : 1; + + rxd = &rq->rx_ring[ring_idx].base[idx].rxd; + rbi = rq->buf_info[ring_idx] + idx; + + BUG_ON(rxd->addr != rbi->dma_addr || rxd->len != rbi->len); + + if (unlikely(rcd->eop && rcd->err)) { + vmxnet3_rx_error(rq, rcd, ctx, adapter); + goto rcd_done; + } + + if (rcd->sop) { /* first buf of the pkt */ + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD || + rcd->rqID != rq->qid); + + BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB); + BUG_ON(ctx->skb != NULL || rbi->skb == NULL); + + if (unlikely(rcd->len == 0)) { + /* Pretend the rx buffer is skipped. */ + BUG_ON(!(rcd->sop && rcd->eop)); + dev_dbg(&adapter->netdev->dev, + "rxRing[%u][%u] 0 length\n", + ring_idx, idx); + goto rcd_done; + } + + ctx->skb = rbi->skb; + rbi->skb = NULL; + + pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len, + PCI_DMA_FROMDEVICE); + + skb_put(ctx->skb, rcd->len); + } else { + BUG_ON(ctx->skb == NULL); + /* non SOP buffer must be type 1 in most cases */ + if (rbi->buf_type == VMXNET3_RX_BUF_PAGE) { + BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY); + + if (rcd->len) { + pci_unmap_page(adapter->pdev, + rbi->dma_addr, rbi->len, + PCI_DMA_FROMDEVICE); + + vmxnet3_append_frag(ctx->skb, rcd, rbi); + rbi->page = NULL; + } + } else { + /* + * The only time a non-SOP buffer is type 0 is + * when it's EOP and error flag is raised, which + * has already been handled. + */ + BUG_ON(true); + } + } + + skb = ctx->skb; + if (rcd->eop) { + skb->len += skb->data_len; + skb->truesize += skb->data_len; + + vmxnet3_rx_csum(adapter, skb, + (union Vmxnet3_GenericDesc *)rcd); + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (unlikely(adapter->vlan_grp && rcd->ts)) { + vlan_hwaccel_receive_skb(skb, + adapter->vlan_grp, rcd->tci); + } else { + netif_receive_skb(skb); + } + + adapter->netdev->last_rx = jiffies; + ctx->skb = NULL; + } + +rcd_done: + /* device may skip some rx descs */ + rq->rx_ring[ring_idx].next2comp = idx; + VMXNET3_INC_RING_IDX_ONLY(rq->rx_ring[ring_idx].next2comp, + rq->rx_ring[ring_idx].size); + + /* refill rx buffers frequently to avoid starving the h/w */ + num_to_alloc = vmxnet3_cmd_ring_desc_avail(rq->rx_ring + + ring_idx); + if (unlikely(num_to_alloc > VMXNET3_RX_ALLOC_THRESHOLD(rq, + ring_idx, adapter))) { + vmxnet3_rq_alloc_rx_buf(rq, ring_idx, num_to_alloc, + adapter); + + /* if needed, update the register */ + if (unlikely(rq->shared->updateRxProd)) { + VMXNET3_WRITE_BAR0_REG(adapter, + rxprod_reg[ring_idx] + rq->qid * 8, + rq->rx_ring[ring_idx].next2fill); + rq->uncommitted[ring_idx] = 0; + } + } + + vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring); + rcd = &rq->comp_ring.base[rq->comp_ring.next2proc].rcd; + } + + return num_rxd; +} + + +static void +vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + u32 i, ring_idx; + struct Vmxnet3_RxDesc *rxd; + + for (ring_idx = 0; ring_idx < 2; ring_idx++) { + for (i = 0; i < rq->rx_ring[ring_idx].size; i++) { + rxd = &rq->rx_ring[ring_idx].base[i].rxd; + + if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD && + rq->buf_info[ring_idx][i].skb) { + pci_unmap_single(adapter->pdev, rxd->addr, + rxd->len, PCI_DMA_FROMDEVICE); + dev_kfree_skb(rq->buf_info[ring_idx][i].skb); + rq->buf_info[ring_idx][i].skb = NULL; + } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY && + rq->buf_info[ring_idx][i].page) { + pci_unmap_page(adapter->pdev, rxd->addr, + rxd->len, PCI_DMA_FROMDEVICE); + put_page(rq->buf_info[ring_idx][i].page); + rq->buf_info[ring_idx][i].page = NULL; + } + } + + rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN; + rq->rx_ring[ring_idx].next2fill = + rq->rx_ring[ring_idx].next2comp = 0; + rq->uncommitted[ring_idx] = 0; + } + + rq->comp_ring.gen = VMXNET3_INIT_GEN; + rq->comp_ring.next2proc = 0; +} + + +void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + int i; + int j; + + /* all rx buffers must have already been freed */ + for (i = 0; i < 2; i++) { + if (rq->buf_info[i]) { + for (j = 0; j < rq->rx_ring[i].size; j++) + BUG_ON(rq->buf_info[i][j].page != NULL); + } + } + + + kfree(rq->buf_info[0]); + + for (i = 0; i < 2; i++) { + if (rq->rx_ring[i].base) { + pci_free_consistent(adapter->pdev, rq->rx_ring[i].size + * sizeof(struct Vmxnet3_RxDesc), + rq->rx_ring[i].base, + rq->rx_ring[i].basePA); + rq->rx_ring[i].base = NULL; + } + rq->buf_info[i] = NULL; + } + + if (rq->comp_ring.base) { + pci_free_consistent(adapter->pdev, rq->comp_ring.size * + sizeof(struct Vmxnet3_RxCompDesc), + rq->comp_ring.base, rq->comp_ring.basePA); + rq->comp_ring.base = NULL; + } +} + + +static int +vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter) +{ + int i; + + /* initialize buf_info */ + for (i = 0; i < rq->rx_ring[0].size; i++) { + + /* 1st buf for a pkt is skbuff */ + if (i % adapter->rx_buf_per_pkt == 0) { + rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB; + rq->buf_info[0][i].len = adapter->skb_buf_size; + } else { /* subsequent bufs for a pkt is frag */ + rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE; + rq->buf_info[0][i].len = PAGE_SIZE; + } + } + for (i = 0; i < rq->rx_ring[1].size; i++) { + rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE; + rq->buf_info[1][i].len = PAGE_SIZE; + } + + /* reset internal state and allocate buffers for both rings */ + for (i = 0; i < 2; i++) { + rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0; + rq->uncommitted[i] = 0; + + memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size * + sizeof(struct Vmxnet3_RxDesc)); + rq->rx_ring[i].gen = VMXNET3_INIT_GEN; + } + if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1, + adapter) == 0) { + /* at least has 1 rx buffer for the 1st ring */ + return -ENOMEM; + } + vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); + + /* reset the comp ring */ + rq->comp_ring.next2proc = 0; + memset(rq->comp_ring.base, 0, rq->comp_ring.size * + sizeof(struct Vmxnet3_RxCompDesc)); + rq->comp_ring.gen = VMXNET3_INIT_GEN; + + /* reset rxctx */ + rq->rx_ctx.skb = NULL; + + /* stats are not reset */ + return 0; +} + + +static int +vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) +{ + int i; + size_t sz; + struct vmxnet3_rx_buf_info *bi; + + for (i = 0; i < 2; i++) { + + sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc); + rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz, + &rq->rx_ring[i].basePA); + if (!rq->rx_ring[i].base) { + printk(KERN_ERR "%s: failed to allocate rx ring %d\n", + adapter->netdev->name, i); + goto err; + } + } + + sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); + rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz, + &rq->comp_ring.basePA); + if (!rq->comp_ring.base) { + printk(KERN_ERR "%s: failed to allocate rx comp ring\n", + adapter->netdev->name); + goto err; + } + + sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + + rq->rx_ring[1].size); + bi = kmalloc(sz, GFP_KERNEL); + if (!bi) { + printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", + adapter->netdev->name); + goto err; + } + memset(bi, 0, sz); + rq->buf_info[0] = bi; + rq->buf_info[1] = bi + rq->rx_ring[0].size; + + return 0; + +err: + vmxnet3_rq_destroy(rq, adapter); + return -ENOMEM; +} + + +static int +vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) +{ + if (unlikely(adapter->shared->ecr)) + vmxnet3_process_events(adapter); + + vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); + return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); +} + + +static int +vmxnet3_poll(struct napi_struct *napi, int budget) +{ + struct vmxnet3_adapter *adapter = container_of(napi, + struct vmxnet3_adapter, napi); + int rxd_done; + + rxd_done = vmxnet3_do_poll(adapter, budget); + + if (rxd_done < budget) { + napi_complete(napi); + vmxnet3_enable_intr(adapter, 0); + } + return rxd_done; +} + + +/* Interrupt handler for vmxnet3 */ +static irqreturn_t +vmxnet3_intr(int irq, void *dev_id) +{ + struct net_device *dev = dev_id; + struct vmxnet3_adapter *adapter = netdev_priv(dev); + + if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { + u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); + if (unlikely(icr == 0)) + /* not ours */ + return IRQ_NONE; + } + + + /* disable intr if needed */ + if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) + vmxnet3_disable_intr(adapter, 0); + + napi_schedule(&adapter->napi); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + + +/* netpoll callback. */ +static void +vmxnet3_netpoll(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int irq; + +#ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) + irq = adapter->intr.msix_entries[0].vector; + else +#endif + irq = adapter->pdev->irq; + + disable_irq(irq); + vmxnet3_intr(irq, netdev); + enable_irq(irq); +} +#endif + +static int +vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) +{ + int err; + +#ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) { + /* we only use 1 MSI-X vector */ + err = request_irq(adapter->intr.msix_entries[0].vector, + vmxnet3_intr, 0, adapter->netdev->name, + adapter->netdev); + } else +#endif + if (adapter->intr.type == VMXNET3_IT_MSI) { + err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, + adapter->netdev->name, adapter->netdev); + } else { + err = request_irq(adapter->pdev->irq, vmxnet3_intr, + IRQF_SHARED, adapter->netdev->name, + adapter->netdev); + } + + if (err) + printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" + ":%d\n", adapter->netdev->name, adapter->intr.type, err); + + + if (!err) { + int i; + /* init our intr settings */ + for (i = 0; i < adapter->intr.num_intrs; i++) + adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE; + + /* next setup intr index for all intr sources */ + adapter->tx_queue.comp_ring.intr_idx = 0; + adapter->rx_queue.comp_ring.intr_idx = 0; + adapter->intr.event_intr_idx = 0; + + printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " + "allocated\n", adapter->netdev->name, adapter->intr.type, + adapter->intr.mask_mode, adapter->intr.num_intrs); + } + + return err; +} + + +static void +vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) +{ + BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || + adapter->intr.num_intrs <= 0); + + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: + { + int i; + + for (i = 0; i < adapter->intr.num_intrs; i++) + free_irq(adapter->intr.msix_entries[i].vector, + adapter->netdev); + break; + } +#endif + case VMXNET3_IT_MSI: + free_irq(adapter->pdev->irq, adapter->netdev); + break; + case VMXNET3_IT_INTX: + free_irq(adapter->pdev->irq, adapter->netdev); + break; + default: + BUG_ON(true); + } +} + + +static void +vmxnet3_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_DriverShared *shared = adapter->shared; + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + if (grp) { + /* add vlan rx stripping. */ + if (adapter->netdev->features & NETIF_F_HW_VLAN_RX) { + int i; + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + adapter->vlan_grp = grp; + + /* update FEATURES to device */ + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + /* + * Clear entire vfTable; then enable untagged pkts. + * Note: setting one entry in vfTable to non-zero turns + * on VLAN rx filtering. + */ + for (i = 0; i < VMXNET3_VFT_SIZE; i++) + vfTable[i] = 0; + + VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + } else { + printk(KERN_ERR "%s: vlan_rx_register when device has " + "no NETIF_F_HW_VLAN_RX\n", netdev->name); + } + } else { + /* remove vlan rx stripping. */ + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + adapter->vlan_grp = NULL; + + if (devRead->misc.uptFeatures & UPT1_F_RXVLAN) { + int i; + + for (i = 0; i < VMXNET3_VFT_SIZE; i++) { + /* clear entire vfTable; this also disables + * VLAN rx filtering + */ + vfTable[i] = 0; + } + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); + + /* update FEATURES to device */ + devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + } +} + + +static void +vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter) +{ + if (adapter->vlan_grp) { + u16 vid; + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + bool activeVlan = false; + + for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { + if (vlan_group_get_device(adapter->vlan_grp, vid)) { + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + activeVlan = true; + } + } + if (activeVlan) { + /* continue to allow untagged pkts */ + VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0); + } + } +} + + +static void +vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + + +static void +vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable; + + VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_VLAN_FILTERS); +} + + +static u8 * +vmxnet3_copy_mc(struct net_device *netdev) +{ + u8 *buf = NULL; + u32 sz = netdev->mc_count * ETH_ALEN; + + /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */ + if (sz <= 0xffff) { + /* We may be called with BH disabled */ + buf = kmalloc(sz, GFP_ATOMIC); + if (buf) { + int i; + struct dev_mc_list *mc = netdev->mc_list; + + for (i = 0; i < netdev->mc_count; i++) { + BUG_ON(!mc); + memcpy(buf + i * ETH_ALEN, mc->dmi_addr, + ETH_ALEN); + mc = mc->next; + } + } + } + return buf; +} + + +static void +vmxnet3_set_mc(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_RxFilterConf *rxConf = + &adapter->shared->devRead.rxFilterConf; + u8 *new_table = NULL; + u32 new_mode = VMXNET3_RXM_UCAST; + + if (netdev->flags & IFF_PROMISC) + new_mode |= VMXNET3_RXM_PROMISC; + + if (netdev->flags & IFF_BROADCAST) + new_mode |= VMXNET3_RXM_BCAST; + + if (netdev->flags & IFF_ALLMULTI) + new_mode |= VMXNET3_RXM_ALL_MULTI; + else + if (netdev->mc_count > 0) { + new_table = vmxnet3_copy_mc(netdev); + if (new_table) { + new_mode |= VMXNET3_RXM_MCAST; + rxConf->mfTableLen = netdev->mc_count * + ETH_ALEN; + rxConf->mfTablePA = virt_to_phys(new_table); + } else { + printk(KERN_INFO "%s: failed to copy mcast list" + ", setting ALL_MULTI\n", netdev->name); + new_mode |= VMXNET3_RXM_ALL_MULTI; + } + } + + + if (!(new_mode & VMXNET3_RXM_MCAST)) { + rxConf->mfTableLen = 0; + rxConf->mfTablePA = 0; + } + + if (new_mode != rxConf->rxMode) { + rxConf->rxMode = new_mode; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_RX_MODE); + } + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_MAC_FILTERS); + + kfree(new_table); +} + + +/* + * Set up driver_shared based on settings in adapter. + */ + +static void +vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) +{ + struct Vmxnet3_DriverShared *shared = adapter->shared; + struct Vmxnet3_DSDevRead *devRead = &shared->devRead; + struct Vmxnet3_TxQueueConf *tqc; + struct Vmxnet3_RxQueueConf *rqc; + int i; + + memset(shared, 0, sizeof(*shared)); + + /* driver settings */ + shared->magic = VMXNET3_REV1_MAGIC; + devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM; + devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ? + VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64); + devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX; + devRead->misc.driverInfo.vmxnet3RevSpt = 1; + devRead->misc.driverInfo.uptVerSpt = 1; + + devRead->misc.ddPA = virt_to_phys(adapter); + devRead->misc.ddLen = sizeof(struct vmxnet3_adapter); + + /* set up feature flags */ + if (adapter->rxcsum) + devRead->misc.uptFeatures |= UPT1_F_RXCSUM; + + if (adapter->lro) { + devRead->misc.uptFeatures |= UPT1_F_LRO; + devRead->misc.maxNumRxSG = 1 + MAX_SKB_FRAGS; + } + if ((adapter->netdev->features & NETIF_F_HW_VLAN_RX) + && adapter->vlan_grp) { + devRead->misc.uptFeatures |= UPT1_F_RXVLAN; + } + + devRead->misc.mtu = adapter->netdev->mtu; + devRead->misc.queueDescPA = adapter->queue_desc_pa; + devRead->misc.queueDescLen = sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc); + + /* tx queue settings */ + BUG_ON(adapter->tx_queue.tx_ring.base == NULL); + + devRead->misc.numTxQueues = 1; + tqc = &adapter->tqd_start->conf; + tqc->txRingBasePA = adapter->tx_queue.tx_ring.basePA; + tqc->dataRingBasePA = adapter->tx_queue.data_ring.basePA; + tqc->compRingBasePA = adapter->tx_queue.comp_ring.basePA; + tqc->ddPA = virt_to_phys(adapter->tx_queue.buf_info); + tqc->txRingSize = adapter->tx_queue.tx_ring.size; + tqc->dataRingSize = adapter->tx_queue.data_ring.size; + tqc->compRingSize = adapter->tx_queue.comp_ring.size; + tqc->ddLen = sizeof(struct vmxnet3_tx_buf_info) * + tqc->txRingSize; + tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; + + /* rx queue settings */ + devRead->misc.numRxQueues = 1; + rqc = &adapter->rqd_start->conf; + rqc->rxRingBasePA[0] = adapter->rx_queue.rx_ring[0].basePA; + rqc->rxRingBasePA[1] = adapter->rx_queue.rx_ring[1].basePA; + rqc->compRingBasePA = adapter->rx_queue.comp_ring.basePA; + rqc->ddPA = virt_to_phys(adapter->rx_queue.buf_info); + rqc->rxRingSize[0] = adapter->rx_queue.rx_ring[0].size; + rqc->rxRingSize[1] = adapter->rx_queue.rx_ring[1].size; + rqc->compRingSize = adapter->rx_queue.comp_ring.size; + rqc->ddLen = sizeof(struct vmxnet3_rx_buf_info) * + (rqc->rxRingSize[0] + rqc->rxRingSize[1]); + rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; + + /* intr settings */ + devRead->intrConf.autoMask = adapter->intr.mask_mode == + VMXNET3_IMM_AUTO; + devRead->intrConf.numIntrs = adapter->intr.num_intrs; + for (i = 0; i < adapter->intr.num_intrs; i++) + devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i]; + + devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx; + + /* rx filter settings */ + devRead->rxFilterConf.rxMode = 0; + vmxnet3_restore_vlan(adapter); + /* the rest are already zeroed */ +} + + +int +vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) +{ + int err; + u32 ret; + + dev_dbg(&adapter->netdev->dev, + "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" + " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, + adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, + adapter->rx_queue.rx_ring[0].size, + adapter->rx_queue.rx_ring[1].size); + + vmxnet3_tq_init(&adapter->tx_queue, adapter); + err = vmxnet3_rq_init(&adapter->rx_queue, adapter); + if (err) { + printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", + adapter->netdev->name, err); + goto rq_err; + } + + err = vmxnet3_request_irqs(adapter); + if (err) { + printk(KERN_ERR "Failed to setup irq for %s: error %d\n", + adapter->netdev->name, err); + goto irq_err; + } + + vmxnet3_setup_driver_shared(adapter); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, + VMXNET3_GET_ADDR_LO(adapter->shared_pa)); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, + VMXNET3_GET_ADDR_HI(adapter->shared_pa)); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_ACTIVATE_DEV); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + + if (ret != 0) { + printk(KERN_ERR "Failed to activate dev %s: error %u\n", + adapter->netdev->name, ret); + err = -EINVAL; + goto activate_err; + } + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, + adapter->rx_queue.rx_ring[0].next2fill); + VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, + adapter->rx_queue.rx_ring[1].next2fill); + + /* Apply the rx filter settins last. */ + vmxnet3_set_mc(adapter->netdev); + + /* + * Check link state when first activating device. It will start the + * tx queue if the link is up. + */ + vmxnet3_check_link(adapter); + + napi_enable(&adapter->napi); + vmxnet3_enable_all_intrs(adapter); + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); + return 0; + +activate_err: + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0); + vmxnet3_free_irqs(adapter); +irq_err: +rq_err: + /* free up buffers we allocated */ + vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); + return err; +} + + +void +vmxnet3_reset_dev(struct vmxnet3_adapter *adapter) +{ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV); +} + + +int +vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) +{ + if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) + return 0; + + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_QUIESCE_DEV); + vmxnet3_disable_all_intrs(adapter); + + napi_disable(&adapter->napi); + netif_tx_disable(adapter->netdev); + adapter->link_speed = 0; + netif_carrier_off(adapter->netdev); + + vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); + vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); + vmxnet3_free_irqs(adapter); + return 0; +} + + +static void +vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +{ + u32 tmp; + + tmp = *(u32 *)mac; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp); + + tmp = (mac[5] << 8) | mac[4]; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp); +} + + +static int +vmxnet3_set_mac_addr(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + vmxnet3_write_mac_addr(adapter, addr->sa_data); + + return 0; +} + + +/* ==================== initialization and cleanup routines ============ */ + +static int +vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64) +{ + int err; + unsigned long mmio_start, mmio_len; + struct pci_dev *pdev = adapter->pdev; + + err = pci_enable_device(pdev); + if (err) { + printk(KERN_ERR "Failed to enable adapter %s: error %d\n", + pci_name(pdev), err); + return err; + } + + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { + printk(KERN_ERR "pci_set_consistent_dma_mask failed " + "for adapter %s\n", pci_name(pdev)); + err = -EIO; + goto err_set_mask; + } + *dma64 = true; + } else { + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { + printk(KERN_ERR "pci_set_dma_mask failed for adapter " + "%s\n", pci_name(pdev)); + err = -EIO; + goto err_set_mask; + } + *dma64 = false; + } + + err = pci_request_selected_regions(pdev, (1 << 2) - 1, + vmxnet3_driver_name); + if (err) { + printk(KERN_ERR "Failed to request region for adapter %s: " + "error %d\n", pci_name(pdev), err); + goto err_set_mask; + } + + pci_set_master(pdev); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + adapter->hw_addr0 = ioremap(mmio_start, mmio_len); + if (!adapter->hw_addr0) { + printk(KERN_ERR "Failed to map bar0 for adapter %s\n", + pci_name(pdev)); + err = -EIO; + goto err_ioremap; + } + + mmio_start = pci_resource_start(pdev, 1); + mmio_len = pci_resource_len(pdev, 1); + adapter->hw_addr1 = ioremap(mmio_start, mmio_len); + if (!adapter->hw_addr1) { + printk(KERN_ERR "Failed to map bar1 for adapter %s\n", + pci_name(pdev)); + err = -EIO; + goto err_bar1; + } + return 0; + +err_bar1: + iounmap(adapter->hw_addr0); +err_ioremap: + pci_release_selected_regions(pdev, (1 << 2) - 1); +err_set_mask: + pci_disable_device(pdev); + return err; +} + + +static void +vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter) +{ + BUG_ON(!adapter->pdev); + + iounmap(adapter->hw_addr0); + iounmap(adapter->hw_addr1); + pci_release_selected_regions(adapter->pdev, (1 << 2) - 1); + pci_disable_device(adapter->pdev); +} + + +static void +vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) +{ + size_t sz; + + if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - + VMXNET3_MAX_ETH_HDR_SIZE) { + adapter->skb_buf_size = adapter->netdev->mtu + + VMXNET3_MAX_ETH_HDR_SIZE; + if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE) + adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE; + + adapter->rx_buf_per_pkt = 1; + } else { + adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE; + sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE + + VMXNET3_MAX_ETH_HDR_SIZE; + adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE; + } + + /* + * for simplicity, force the ring0 size to be a multiple of + * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN + */ + sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; + adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + + sz - 1) / sz * sz; + adapter->rx_queue.rx_ring[0].size = min_t(u32, + adapter->rx_queue.rx_ring[0].size, + VMXNET3_RX_RING_MAX_SIZE / sz * sz); +} + + +int +vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, + u32 rx_ring_size, u32 rx_ring2_size) +{ + int err; + + adapter->tx_queue.tx_ring.size = tx_ring_size; + adapter->tx_queue.data_ring.size = tx_ring_size; + adapter->tx_queue.comp_ring.size = tx_ring_size; + adapter->tx_queue.shared = &adapter->tqd_start->ctrl; + adapter->tx_queue.stopped = true; + err = vmxnet3_tq_create(&adapter->tx_queue, adapter); + if (err) + return err; + + adapter->rx_queue.rx_ring[0].size = rx_ring_size; + adapter->rx_queue.rx_ring[1].size = rx_ring2_size; + vmxnet3_adjust_rx_ring_size(adapter); + adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + + adapter->rx_queue.rx_ring[1].size; + adapter->rx_queue.qid = 0; + adapter->rx_queue.qid2 = 1; + adapter->rx_queue.shared = &adapter->rqd_start->ctrl; + err = vmxnet3_rq_create(&adapter->rx_queue, adapter); + if (err) + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + + return err; +} + +static int +vmxnet3_open(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter; + int err; + + adapter = netdev_priv(netdev); + + spin_lock_init(&adapter->tx_queue.tx_lock); + + err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE); + if (err) + goto queue_err; + + err = vmxnet3_activate_dev(adapter); + if (err) + goto activate_err; + + return 0; + +activate_err: + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); +queue_err: + return err; +} + + +static int +vmxnet3_close(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + vmxnet3_quiesce_dev(adapter); + + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + + + return 0; +} + + +void +vmxnet3_force_close(struct vmxnet3_adapter *adapter) +{ + /* + * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise + * vmxnet3_close() will deadlock. + */ + BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); + + /* we need to enable NAPI, otherwise dev_close will deadlock */ + napi_enable(&adapter->napi); + dev_close(adapter->netdev); +} + + +static int +vmxnet3_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + int err = 0; + + if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU) + return -EINVAL; + + if (new_mtu > 1500 && !adapter->jumbo_frame) + return -EINVAL; + + netdev->mtu = new_mtu; + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(netdev)) { + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + + /* we need to re-create the rx queue based on the new mtu */ + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + vmxnet3_adjust_rx_ring_size(adapter); + adapter->rx_queue.comp_ring.size = + adapter->rx_queue.rx_ring[0].size + + adapter->rx_queue.rx_ring[1].size; + err = vmxnet3_rq_create(&adapter->rx_queue, adapter); + if (err) { + printk(KERN_ERR "%s: failed to re-create rx queue," + " error %d. Closing it.\n", netdev->name, err); + goto out; + } + + err = vmxnet3_activate_dev(adapter); + if (err) { + printk(KERN_ERR "%s: failed to re-activate, error %d. " + "Closing it\n", netdev->name, err); + goto out; + } + } + +out: + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + if (err) + vmxnet3_force_close(adapter); + + return err; +} + + +static void +vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64) +{ + struct net_device *netdev = adapter->netdev; + + netdev->features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_TX | + NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_LRO; + + printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro"); + + adapter->rxcsum = true; + adapter->jumbo_frame = true; + adapter->lro = true; + + if (dma64) { + netdev->features |= NETIF_F_HIGHDMA; + printk(" highDMA"); + } + + netdev->vlan_features = netdev->features; + printk("\n"); +} + + +static void +vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac) +{ + u32 tmp; + + tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL); + *(u32 *)mac = tmp; + + tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH); + mac[4] = tmp & 0xff; + mac[5] = (tmp >> 8) & 0xff; +} + + +static void +vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) +{ + u32 cfg; + + /* intr settings */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_CONF_INTR); + cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + adapter->intr.type = cfg & 0x3; + adapter->intr.mask_mode = (cfg >> 2) & 0x3; + + if (adapter->intr.type == VMXNET3_IT_AUTO) { + int err; + +#ifdef CONFIG_PCI_MSI + adapter->intr.msix_entries[0].entry = 0; + err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, + VMXNET3_LINUX_MAX_MSIX_VECT); + if (!err) { + adapter->intr.num_intrs = 1; + adapter->intr.type = VMXNET3_IT_MSIX; + return; + } +#endif + + err = pci_enable_msi(adapter->pdev); + if (!err) { + adapter->intr.num_intrs = 1; + adapter->intr.type = VMXNET3_IT_MSI; + return; + } + } + + adapter->intr.type = VMXNET3_IT_INTX; + + /* INT-X related setting */ + adapter->intr.num_intrs = 1; +} + + +static void +vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter) +{ + if (adapter->intr.type == VMXNET3_IT_MSIX) + pci_disable_msix(adapter->pdev); + else if (adapter->intr.type == VMXNET3_IT_MSI) + pci_disable_msi(adapter->pdev); + else + BUG_ON(adapter->intr.type != VMXNET3_IT_INTX); +} + + +static void +vmxnet3_tx_timeout(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + adapter->tx_timeout_count++; + + printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); + schedule_work(&adapter->work); +} + + +static void +vmxnet3_reset_work(struct work_struct *data) +{ + struct vmxnet3_adapter *adapter; + + adapter = container_of(data, struct vmxnet3_adapter, work); + + /* if another thread is resetting the device, no need to proceed */ + if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + return; + + /* if the device is closed, we must leave it alone */ + if (netif_running(adapter->netdev)) { + printk(KERN_INFO "%s: resetting\n", adapter->netdev->name); + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + vmxnet3_activate_dev(adapter); + } else { + printk(KERN_INFO "%s: already closed\n", adapter->netdev->name); + } + + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); +} + + +static int __devinit +vmxnet3_probe_device(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + static const struct net_device_ops vmxnet3_netdev_ops = { + .ndo_open = vmxnet3_open, + .ndo_stop = vmxnet3_close, + .ndo_start_xmit = vmxnet3_xmit_frame, + .ndo_set_mac_address = vmxnet3_set_mac_addr, + .ndo_change_mtu = vmxnet3_change_mtu, + .ndo_get_stats = vmxnet3_get_stats, + .ndo_tx_timeout = vmxnet3_tx_timeout, + .ndo_set_multicast_list = vmxnet3_set_mc, + .ndo_vlan_rx_register = vmxnet3_vlan_rx_register, + .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vmxnet3_netpoll, +#endif + }; + int err; + bool dma64 = false; /* stupid gcc */ + u32 ver; + struct net_device *netdev; + struct vmxnet3_adapter *adapter; + u8 mac[ETH_ALEN]; + + netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter)); + if (!netdev) { + printk(KERN_ERR "Failed to alloc ethernet device for adapter " + "%s\n", pci_name(pdev)); + return -ENOMEM; + } + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + + adapter->shared = pci_alloc_consistent(adapter->pdev, + sizeof(struct Vmxnet3_DriverShared), + &adapter->shared_pa); + if (!adapter->shared) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_shared; + } + + adapter->tqd_start = pci_alloc_consistent(adapter->pdev, + sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + &adapter->queue_desc_pa); + + if (!adapter->tqd_start) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_queue_desc; + } + adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + + 1); + + adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); + if (adapter->pm_conf == NULL) { + printk(KERN_ERR "Failed to allocate memory for %s\n", + pci_name(pdev)); + err = -ENOMEM; + goto err_alloc_pm; + } + + err = vmxnet3_alloc_pci_resources(adapter, &dma64); + if (err < 0) + goto err_alloc_pci; + + ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); + if (ver & 1) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1); + } else { + printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter" + " %s\n", ver, pci_name(pdev)); + err = -EBUSY; + goto err_ver; + } + + ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS); + if (ver & 1) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1); + } else { + printk(KERN_ERR "Incompatible upt version (0x%x) for " + "adapter %s\n", ver, pci_name(pdev)); + err = -EBUSY; + goto err_ver; + } + + vmxnet3_declare_features(adapter, dma64); + + adapter->dev_number = atomic_read(&devices_found); + vmxnet3_alloc_intr_resources(adapter); + + vmxnet3_read_mac_addr(adapter, mac); + memcpy(netdev->dev_addr, mac, netdev->addr_len); + + netdev->netdev_ops = &vmxnet3_netdev_ops; + netdev->watchdog_timeo = 5 * HZ; + vmxnet3_set_ethtool_ops(netdev); + + INIT_WORK(&adapter->work, vmxnet3_reset_work); + + netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); + SET_NETDEV_DEV(netdev, &pdev->dev); + err = register_netdev(netdev); + + if (err) { + printk(KERN_ERR "Failed to register adapter %s\n", + pci_name(pdev)); + goto err_register; + } + + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); + atomic_inc(&devices_found); + return 0; + +err_register: + vmxnet3_free_intr_resources(adapter); +err_ver: + vmxnet3_free_pci_resources(adapter); +err_alloc_pci: + kfree(adapter->pm_conf); +err_alloc_pm: + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + adapter->tqd_start, adapter->queue_desc_pa); +err_alloc_queue_desc: + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); +err_alloc_shared: + pci_set_drvdata(pdev, NULL); + free_netdev(netdev); + return err; +} + + +static void __devexit +vmxnet3_remove_device(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + flush_scheduled_work(); + + unregister_netdev(netdev); + + vmxnet3_free_intr_resources(adapter); + vmxnet3_free_pci_resources(adapter); + kfree(adapter->pm_conf); + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + + sizeof(struct Vmxnet3_RxQueueDesc), + adapter->tqd_start, adapter->queue_desc_pa); + pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), + adapter->shared, adapter->shared_pa); + free_netdev(netdev); +} + + +#ifdef CONFIG_PM + +static int +vmxnet3_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_PMConf *pmConf; + struct ethhdr *ehdr; + struct arphdr *ahdr; + u8 *arpreq; + struct in_device *in_dev; + struct in_ifaddr *ifa; + int i = 0; + + if (!netif_running(netdev)) + return 0; + + vmxnet3_disable_all_intrs(adapter); + vmxnet3_free_irqs(adapter); + vmxnet3_free_intr_resources(adapter); + + netif_device_detach(netdev); + netif_stop_queue(netdev); + + /* Create wake-up filters. */ + pmConf = adapter->pm_conf; + memset(pmConf, 0, sizeof(*pmConf)); + + if (adapter->wol & WAKE_UCAST) { + pmConf->filters[i].patternSize = ETH_ALEN; + pmConf->filters[i].maskSize = 1; + memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN); + pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */ + + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; + i++; + } + + if (adapter->wol & WAKE_ARP) { + in_dev = in_dev_get(netdev); + if (!in_dev) + goto skip_arp; + + ifa = (struct in_ifaddr *)in_dev->ifa_list; + if (!ifa) + goto skip_arp; + + pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/ + sizeof(struct arphdr) + /* ARP header */ + 2 * ETH_ALEN + /* 2 Ethernet addresses*/ + 2 * sizeof(u32); /*2 IPv4 addresses */ + pmConf->filters[i].maskSize = + (pmConf->filters[i].patternSize - 1) / 8 + 1; + + /* ETH_P_ARP in Ethernet header. */ + ehdr = (struct ethhdr *)pmConf->filters[i].pattern; + ehdr->h_proto = htons(ETH_P_ARP); + + /* ARPOP_REQUEST in ARP header. */ + ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN]; + ahdr->ar_op = htons(ARPOP_REQUEST); + arpreq = (u8 *)(ahdr + 1); + + /* The Unicast IPv4 address in 'tip' field. */ + arpreq += 2 * ETH_ALEN + sizeof(u32); + *(u32 *)arpreq = ifa->ifa_address; + + /* The mask for the relevant bits. */ + pmConf->filters[i].mask[0] = 0x00; + pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */ + pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */ + pmConf->filters[i].mask[3] = 0x00; + pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */ + pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */ + in_dev_put(in_dev); + + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER; + i++; + } + +skip_arp: + if (adapter->wol & WAKE_MAGIC) + pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC; + + pmConf->numFilters = i; + + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_PMCFG); + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND), + adapter->wol); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND)); + + return 0; +} + + +static int +vmxnet3_resume(struct device *device) +{ + int err; + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *netdev = pci_get_drvdata(pdev); + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + struct Vmxnet3_PMConf *pmConf; + + if (!netif_running(netdev)) + return 0; + + /* Destroy wake-up filters. */ + pmConf = adapter->pm_conf; + memset(pmConf, 0, sizeof(*pmConf)); + + adapter->shared->devRead.pmConfDesc.confVer = 1; + adapter->shared->devRead.pmConfDesc.confLen = sizeof(*pmConf); + adapter->shared->devRead.pmConfDesc.confPA = virt_to_phys(pmConf); + + netif_device_attach(netdev); + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + err = pci_enable_device_mem(pdev); + if (err != 0) + return err; + + pci_enable_wake(pdev, PCI_D0, 0); + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_PMCFG); + vmxnet3_alloc_intr_resources(adapter); + vmxnet3_request_irqs(adapter); + vmxnet3_enable_all_intrs(adapter); + + return 0; +} + +static struct dev_pm_ops vmxnet3_pm_ops = { + .suspend = vmxnet3_suspend, + .resume = vmxnet3_resume, +}; +#endif + +static struct pci_driver vmxnet3_driver = { + .name = vmxnet3_driver_name, + .id_table = vmxnet3_pciid_table, + .probe = vmxnet3_probe_device, + .remove = __devexit_p(vmxnet3_remove_device), +#ifdef CONFIG_PM + .driver.pm = &vmxnet3_pm_ops, +#endif +}; + + +static int __init +vmxnet3_init_module(void) +{ + printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC, + VMXNET3_DRIVER_VERSION_REPORT); + return pci_register_driver(&vmxnet3_driver); +} + +module_init(vmxnet3_init_module); + + +static void +vmxnet3_exit_module(void) +{ + pci_unregister_driver(&vmxnet3_driver); +} + +module_exit(vmxnet3_exit_module); + +MODULE_AUTHOR("VMware, Inc."); +MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC); +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING); diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c new file mode 100644 index 00000000000..c2c15e4cafc --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -0,0 +1,566 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + + +#include "vmxnet3_int.h" + +struct vmxnet3_stat_desc { + char desc[ETH_GSTRING_LEN]; + int offset; +}; + + +static u32 +vmxnet3_get_rx_csum(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + return adapter->rxcsum; +} + + +static int +vmxnet3_set_rx_csum(struct net_device *netdev, u32 val) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (adapter->rxcsum != val) { + adapter->rxcsum = val; + if (netif_running(netdev)) { + if (val) + adapter->shared->devRead.misc.uptFeatures |= + UPT1_F_RXCSUM; + else + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_RXCSUM; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + } + return 0; +} + + +/* per tq stats maintained by the device */ +static const struct vmxnet3_stat_desc +vmxnet3_tq_dev_stats[] = { + /* description, offset */ + { "TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, + { "TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, + { "ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, + { "ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, + { "mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, + { "mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, + { "bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, + { "bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, + { "pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, + { "pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, +}; + +/* per tq stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_tq_driver_stats[] = { + /* description, offset */ + {"drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, + drop_total) }, + { " too many frags", offsetof(struct vmxnet3_tq_driver_stats, + drop_too_many_frags) }, + { " giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + drop_oversized_hdr) }, + { " hdr err", offsetof(struct vmxnet3_tq_driver_stats, + drop_hdr_inspect_err) }, + { " tso", offsetof(struct vmxnet3_tq_driver_stats, + drop_tso) }, + { "ring full", offsetof(struct vmxnet3_tq_driver_stats, + tx_ring_full) }, + { "pkts linearized", offsetof(struct vmxnet3_tq_driver_stats, + linearized) }, + { "hdr cloned", offsetof(struct vmxnet3_tq_driver_stats, + copy_skb_header) }, + { "giant hdr", offsetof(struct vmxnet3_tq_driver_stats, + oversized_hdr) }, +}; + +/* per rq stats maintained by the device */ +static const struct vmxnet3_stat_desc +vmxnet3_rq_dev_stats[] = { + { "LRO pkts rx", offsetof(struct UPT1_RxStats, LROPktsRxOK) }, + { "LRO byte rx", offsetof(struct UPT1_RxStats, LROBytesRxOK) }, + { "ucast pkts rx", offsetof(struct UPT1_RxStats, ucastPktsRxOK) }, + { "ucast bytes rx", offsetof(struct UPT1_RxStats, ucastBytesRxOK) }, + { "mcast pkts rx", offsetof(struct UPT1_RxStats, mcastPktsRxOK) }, + { "mcast bytes rx", offsetof(struct UPT1_RxStats, mcastBytesRxOK) }, + { "bcast pkts rx", offsetof(struct UPT1_RxStats, bcastPktsRxOK) }, + { "bcast bytes rx", offsetof(struct UPT1_RxStats, bcastBytesRxOK) }, + { "pkts rx out of buf", offsetof(struct UPT1_RxStats, pktsRxOutOfBuf) }, + { "pkts rx err", offsetof(struct UPT1_RxStats, pktsRxError) }, +}; + +/* per rq stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_rq_driver_stats[] = { + /* description, offset */ + { "drv dropped rx total", offsetof(struct vmxnet3_rq_driver_stats, + drop_total) }, + { " err", offsetof(struct vmxnet3_rq_driver_stats, + drop_err) }, + { " fcs", offsetof(struct vmxnet3_rq_driver_stats, + drop_fcs) }, + { "rx buf alloc fail", offsetof(struct vmxnet3_rq_driver_stats, + rx_buf_alloc_failure) }, +}; + +/* gloabl stats maintained by the driver */ +static const struct vmxnet3_stat_desc +vmxnet3_global_stats[] = { + /* description, offset */ + { "tx timeout count", offsetof(struct vmxnet3_adapter, + tx_timeout_count) } +}; + + +struct net_device_stats * +vmxnet3_get_stats(struct net_device *netdev) +{ + struct vmxnet3_adapter *adapter; + struct vmxnet3_tq_driver_stats *drvTxStats; + struct vmxnet3_rq_driver_stats *drvRxStats; + struct UPT1_TxStats *devTxStats; + struct UPT1_RxStats *devRxStats; + struct net_device_stats *net_stats = &netdev->stats; + + adapter = netdev_priv(netdev); + + /* Collect the dev stats into the shared area */ + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + /* Assuming that we have a single queue device */ + devTxStats = &adapter->tqd_start->stats; + devRxStats = &adapter->rqd_start->stats; + + /* Get access to the driver stats per queue */ + drvTxStats = &adapter->tx_queue.stats; + drvRxStats = &adapter->rx_queue.stats; + + memset(net_stats, 0, sizeof(*net_stats)); + + net_stats->rx_packets = devRxStats->ucastPktsRxOK + + devRxStats->mcastPktsRxOK + + devRxStats->bcastPktsRxOK; + + net_stats->tx_packets = devTxStats->ucastPktsTxOK + + devTxStats->mcastPktsTxOK + + devTxStats->bcastPktsTxOK; + + net_stats->rx_bytes = devRxStats->ucastBytesRxOK + + devRxStats->mcastBytesRxOK + + devRxStats->bcastBytesRxOK; + + net_stats->tx_bytes = devTxStats->ucastBytesTxOK + + devTxStats->mcastBytesTxOK + + devTxStats->bcastBytesTxOK; + + net_stats->rx_errors = devRxStats->pktsRxError; + net_stats->tx_errors = devTxStats->pktsTxError; + net_stats->rx_dropped = drvRxStats->drop_total; + net_stats->tx_dropped = drvTxStats->drop_total; + net_stats->multicast = devRxStats->mcastPktsRxOK; + + return net_stats; +} + +static int +vmxnet3_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(vmxnet3_tq_dev_stats) + + ARRAY_SIZE(vmxnet3_tq_driver_stats) + + ARRAY_SIZE(vmxnet3_rq_dev_stats) + + ARRAY_SIZE(vmxnet3_rq_driver_stats) + + ARRAY_SIZE(vmxnet3_global_stats); + default: + return -EOPNOTSUPP; + } +} + + +static int +vmxnet3_get_regs_len(struct net_device *netdev) +{ + return 20 * sizeof(u32); +} + + +static void +vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver)); + drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0'; + + strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT, + sizeof(drvinfo->version)); + drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0'; + + strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); + drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0'; + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + ETHTOOL_BUSINFO_LEN); + drvinfo->n_stats = vmxnet3_get_sset_count(netdev, ETH_SS_STATS); + drvinfo->testinfo_len = 0; + drvinfo->eedump_len = 0; + drvinfo->regdump_len = vmxnet3_get_regs_len(netdev); +} + + +static void +vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf) +{ + if (stringset == ETH_SS_STATS) { + int i; + + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) { + memcpy(buf, vmxnet3_tq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) { + memcpy(buf, vmxnet3_tq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) { + memcpy(buf, vmxnet3_rq_dev_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) { + memcpy(buf, vmxnet3_rq_driver_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) { + memcpy(buf, vmxnet3_global_stats[i].desc, + ETH_GSTRING_LEN); + buf += ETH_GSTRING_LEN; + } + } +} + +static u32 +vmxnet3_get_flags(struct net_device *netdev) { + return netdev->features; +} + +static int +vmxnet3_set_flags(struct net_device *netdev, u32 data) { + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1; + u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1; + + if (lro_requested ^ lro_present) { + /* toggle the LRO feature*/ + netdev->features ^= NETIF_F_LRO; + + /* update harware LRO capability accordingly */ + if (lro_requested) + adapter->shared->devRead.misc.uptFeatures &= UPT1_F_LRO; + else + adapter->shared->devRead.misc.uptFeatures &= + ~UPT1_F_LRO; + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_UPDATE_FEATURE); + } + return 0; +} + +static void +vmxnet3_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *buf) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u8 *base; + int i; + + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); + + /* this does assume each counter is 64-bit wide */ + + base = (u8 *)&adapter->tqd_start->stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); + + base = (u8 *)&adapter->tx_queue.stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); + + base = (u8 *)&adapter->rqd_start->stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); + + base = (u8 *)&adapter->rx_queue.stats; + for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); + + base = (u8 *)adapter; + for (i = 0; i < ARRAY_SIZE(vmxnet3_global_stats); i++) + *buf++ = *(u64 *)(base + vmxnet3_global_stats[i].offset); +} + + +static void +vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 *buf = p; + + memset(p, 0, vmxnet3_get_regs_len(netdev)); + + regs->version = 1; + + /* Update vmxnet3_get_regs_len if we want to dump more registers */ + + /* make each ring use multiple of 16 bytes */ + buf[0] = adapter->tx_queue.tx_ring.next2fill; + buf[1] = adapter->tx_queue.tx_ring.next2comp; + buf[2] = adapter->tx_queue.tx_ring.gen; + buf[3] = 0; + + buf[4] = adapter->tx_queue.comp_ring.next2proc; + buf[5] = adapter->tx_queue.comp_ring.gen; + buf[6] = adapter->tx_queue.stopped; + buf[7] = 0; + + buf[8] = adapter->rx_queue.rx_ring[0].next2fill; + buf[9] = adapter->rx_queue.rx_ring[0].next2comp; + buf[10] = adapter->rx_queue.rx_ring[0].gen; + buf[11] = 0; + + buf[12] = adapter->rx_queue.rx_ring[1].next2fill; + buf[13] = adapter->rx_queue.rx_ring[1].next2comp; + buf[14] = adapter->rx_queue.rx_ring[1].gen; + buf[15] = 0; + + buf[16] = adapter->rx_queue.comp_ring.next2proc; + buf[17] = adapter->rx_queue.comp_ring.gen; + buf[18] = 0; + buf[19] = 0; +} + + +static void +vmxnet3_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_UCAST | WAKE_ARP | WAKE_MAGIC; + wol->wolopts = adapter->wol; +} + + +static int +vmxnet3_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_PHY | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGICSECURE)) { + return -EOPNOTSUPP; + } + + adapter->wol = wol->wolopts; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + + +static int +vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + ecmd->supported = SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_TP; + ecmd->advertising = ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed) { + ecmd->speed = adapter->link_speed; + ecmd->duplex = DUPLEX_FULL; + } else { + ecmd->speed = -1; + ecmd->duplex = -1; + } + return 0; +} + + +static void +vmxnet3_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + + param->rx_max_pending = VMXNET3_RX_RING_MAX_SIZE; + param->tx_max_pending = VMXNET3_TX_RING_MAX_SIZE; + param->rx_mini_max_pending = 0; + param->rx_jumbo_max_pending = 0; + + param->rx_pending = adapter->rx_queue.rx_ring[0].size; + param->tx_pending = adapter->tx_queue.tx_ring.size; + param->rx_mini_pending = 0; + param->rx_jumbo_pending = 0; +} + + +static int +vmxnet3_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *param) +{ + struct vmxnet3_adapter *adapter = netdev_priv(netdev); + u32 new_tx_ring_size, new_rx_ring_size; + u32 sz; + int err = 0; + + if (param->tx_pending == 0 || param->tx_pending > + VMXNET3_TX_RING_MAX_SIZE) + return -EINVAL; + + if (param->rx_pending == 0 || param->rx_pending > + VMXNET3_RX_RING_MAX_SIZE) + return -EINVAL; + + + /* round it up to a multiple of VMXNET3_RING_SIZE_ALIGN */ + new_tx_ring_size = (param->tx_pending + VMXNET3_RING_SIZE_MASK) & + ~VMXNET3_RING_SIZE_MASK; + new_tx_ring_size = min_t(u32, new_tx_ring_size, + VMXNET3_TX_RING_MAX_SIZE); + if (new_tx_ring_size > VMXNET3_TX_RING_MAX_SIZE || (new_tx_ring_size % + VMXNET3_RING_SIZE_ALIGN) != 0) + return -EINVAL; + + /* ring0 has to be a multiple of + * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN + */ + sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; + new_rx_ring_size = (param->rx_pending + sz - 1) / sz * sz; + new_rx_ring_size = min_t(u32, new_rx_ring_size, + VMXNET3_RX_RING_MAX_SIZE / sz * sz); + if (new_rx_ring_size > VMXNET3_RX_RING_MAX_SIZE || (new_rx_ring_size % + sz) != 0) + return -EINVAL; + + if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && + new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { + return 0; + } + + /* + * Reset_work may be in the middle of resetting the device, wait for its + * completion. + */ + while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)) + msleep(1); + + if (netif_running(netdev)) { + vmxnet3_quiesce_dev(adapter); + vmxnet3_reset_dev(adapter); + + /* recreate the rx queue and the tx queue based on the + * new sizes */ + vmxnet3_tq_destroy(&adapter->tx_queue, adapter); + vmxnet3_rq_destroy(&adapter->rx_queue, adapter); + + err = vmxnet3_create_queues(adapter, new_tx_ring_size, + new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); + if (err) { + /* failed, most likely because of OOM, try default + * size */ + printk(KERN_ERR "%s: failed to apply new sizes, try the" + " default ones\n", netdev->name); + err = vmxnet3_create_queues(adapter, + VMXNET3_DEF_TX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE, + VMXNET3_DEF_RX_RING_SIZE); + if (err) { + printk(KERN_ERR "%s: failed to create queues " + "with default sizes. Closing it\n", + netdev->name); + goto out; + } + } + + err = vmxnet3_activate_dev(adapter); + if (err) + printk(KERN_ERR "%s: failed to re-activate, error %d." + " Closing it\n", netdev->name, err); + } + +out: + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + if (err) + vmxnet3_force_close(adapter); + + return err; +} + + +static struct ethtool_ops vmxnet3_ethtool_ops = { + .get_settings = vmxnet3_get_settings, + .get_drvinfo = vmxnet3_get_drvinfo, + .get_regs_len = vmxnet3_get_regs_len, + .get_regs = vmxnet3_get_regs, + .get_wol = vmxnet3_get_wol, + .set_wol = vmxnet3_set_wol, + .get_link = ethtool_op_get_link, + .get_rx_csum = vmxnet3_get_rx_csum, + .set_rx_csum = vmxnet3_set_rx_csum, + .get_tx_csum = ethtool_op_get_tx_csum, + .set_tx_csum = ethtool_op_set_tx_hw_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, + .get_strings = vmxnet3_get_strings, + .get_flags = vmxnet3_get_flags, + .set_flags = vmxnet3_set_flags, + .get_sset_count = vmxnet3_get_sset_count, + .get_ethtool_stats = vmxnet3_get_ethtool_stats, + .get_ringparam = vmxnet3_get_ringparam, + .set_ringparam = vmxnet3_set_ringparam, +}; + +void vmxnet3_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &vmxnet3_ethtool_ops); +} diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h new file mode 100644 index 00000000000..3c0d70d5811 --- /dev/null +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -0,0 +1,389 @@ +/* + * Linux driver for VMware's vmxnet3 ethernet NIC. + * + * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; version 2 of the License and no later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com> + * + */ + +#ifndef _VMXNET3_INT_H +#define _VMXNET3_INT_H + +#include <linux/types.h> +#include <linux/ethtool.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/ethtool.h> +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/ioport.h> +#include <linux/highmem.h> +#include <linux/init.h> +#include <linux/timer.h> +#include <linux/skbuff.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/uaccess.h> +#include <asm/dma.h> +#include <asm/page.h> + +#include <linux/tcp.h> +#include <linux/udp.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/in.h> +#include <linux/etherdevice.h> +#include <asm/checksum.h> +#include <linux/if_vlan.h> +#include <linux/if_arp.h> +#include <linux/inetdevice.h> + +#include "vmxnet3_defs.h" + +#ifdef DEBUG +# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)" +#else +# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI" +#endif + + +/* + * Version numbers + */ +#define VMXNET3_DRIVER_VERSION_STRING "1.0.5.0-k" + +/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ +#define VMXNET3_DRIVER_VERSION_NUM 0x01000500 + + +/* + * Capabilities + */ + +enum { + VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */ + VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over + * IPv4 */ + VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */ + VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */ + VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */ + VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation + * offload */ + VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */ + VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */ + VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */ + VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */ + VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */ + VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */ + VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */ + VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */ + VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries + * for a pkt */ + VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */ + VMNET_CAP_LPD = 0x10000, /* large pkt delivery */ + VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/ + VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/ + /* pages transmits */ + VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */ + VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */ + VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */ + /* pkts up to 256kB. */ + VMNET_CAP_UPT = 0x400000 /* Support UPT */ +}; + +/* + * PCI vendor and device IDs. + */ +#define PCI_VENDOR_ID_VMWARE 0x15AD +#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 +#define MAX_ETHERNET_CARDS 10 +#define MAX_PCI_PASSTHRU_DEVICE 6 + +struct vmxnet3_cmd_ring { + union Vmxnet3_GenericDesc *base; + u32 size; + u32 next2fill; + u32 next2comp; + u8 gen; + dma_addr_t basePA; +}; + +static inline void +vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring) +{ + ring->next2fill++; + if (unlikely(ring->next2fill == ring->size)) { + ring->next2fill = 0; + VMXNET3_FLIP_RING_GEN(ring->gen); + } +} + +static inline void +vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring) +{ + VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size); +} + +static inline int +vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring) +{ + return (ring->next2comp > ring->next2fill ? 0 : ring->size) + + ring->next2comp - ring->next2fill - 1; +} + +struct vmxnet3_comp_ring { + union Vmxnet3_GenericDesc *base; + u32 size; + u32 next2proc; + u8 gen; + u8 intr_idx; + dma_addr_t basePA; +}; + +static inline void +vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring) +{ + ring->next2proc++; + if (unlikely(ring->next2proc == ring->size)) { + ring->next2proc = 0; + VMXNET3_FLIP_RING_GEN(ring->gen); + } +} + +struct vmxnet3_tx_data_ring { + struct Vmxnet3_TxDataDesc *base; + u32 size; + dma_addr_t basePA; +}; + +enum vmxnet3_buf_map_type { + VMXNET3_MAP_INVALID = 0, + VMXNET3_MAP_NONE, + VMXNET3_MAP_SINGLE, + VMXNET3_MAP_PAGE, +}; + +struct vmxnet3_tx_buf_info { + u32 map_type; + u16 len; + u16 sop_idx; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +struct vmxnet3_tq_driver_stats { + u64 drop_total; /* # of pkts dropped by the driver, the + * counters below track droppings due to + * different reasons + */ + u64 drop_too_many_frags; + u64 drop_oversized_hdr; + u64 drop_hdr_inspect_err; + u64 drop_tso; + + u64 tx_ring_full; + u64 linearized; /* # of pkts linearized */ + u64 copy_skb_header; /* # of times we have to copy skb header */ + u64 oversized_hdr; +}; + +struct vmxnet3_tx_ctx { + bool ipv4; + u16 mss; + u32 eth_ip_hdr_size; /* only valid for pkts requesting tso or csum + * offloading + */ + u32 l4_hdr_size; /* only valid if mss != 0 */ + u32 copy_size; /* # of bytes copied into the data ring */ + union Vmxnet3_GenericDesc *sop_txd; + union Vmxnet3_GenericDesc *eop_txd; +}; + +struct vmxnet3_tx_queue { + spinlock_t tx_lock; + struct vmxnet3_cmd_ring tx_ring; + struct vmxnet3_tx_buf_info *buf_info; + struct vmxnet3_tx_data_ring data_ring; + struct vmxnet3_comp_ring comp_ring; + struct Vmxnet3_TxQueueCtrl *shared; + struct vmxnet3_tq_driver_stats stats; + bool stopped; + int num_stop; /* # of times the queue is + * stopped */ +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + +enum vmxnet3_rx_buf_type { + VMXNET3_RX_BUF_NONE = 0, + VMXNET3_RX_BUF_SKB = 1, + VMXNET3_RX_BUF_PAGE = 2 +}; + +struct vmxnet3_rx_buf_info { + enum vmxnet3_rx_buf_type buf_type; + u16 len; + union { + struct sk_buff *skb; + struct page *page; + }; + dma_addr_t dma_addr; +}; + +struct vmxnet3_rx_ctx { + struct sk_buff *skb; + u32 sop_idx; +}; + +struct vmxnet3_rq_driver_stats { + u64 drop_total; + u64 drop_err; + u64 drop_fcs; + u64 rx_buf_alloc_failure; +}; + +struct vmxnet3_rx_queue { + struct vmxnet3_cmd_ring rx_ring[2]; + struct vmxnet3_comp_ring comp_ring; + struct vmxnet3_rx_ctx rx_ctx; + u32 qid; /* rqID in RCD for buffer from 1st ring */ + u32 qid2; /* rqID in RCD for buffer from 2nd ring */ + u32 uncommitted[2]; /* # of buffers allocated since last RXPROD + * update */ + struct vmxnet3_rx_buf_info *buf_info[2]; + struct Vmxnet3_RxQueueCtrl *shared; + struct vmxnet3_rq_driver_stats stats; +} __attribute__((__aligned__(SMP_CACHE_BYTES))); + +#define VMXNET3_LINUX_MAX_MSIX_VECT 1 + +struct vmxnet3_intr { + enum vmxnet3_intr_mask_mode mask_mode; + enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ + u8 num_intrs; /* # of intr vectors */ + u8 event_intr_idx; /* idx of the intr vector for event */ + u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ +#ifdef CONFIG_PCI_MSI + struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; +#endif +}; + +#define VMXNET3_STATE_BIT_RESETTING 0 +#define VMXNET3_STATE_BIT_QUIESCED 1 +struct vmxnet3_adapter { + struct vmxnet3_tx_queue tx_queue; + struct vmxnet3_rx_queue rx_queue; + struct napi_struct napi; + struct vlan_group *vlan_grp; + + struct vmxnet3_intr intr; + + struct Vmxnet3_DriverShared *shared; + struct Vmxnet3_PMConf *pm_conf; + struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ + struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ + struct net_device *netdev; + struct pci_dev *pdev; + + u8 *hw_addr0; /* for BAR 0 */ + u8 *hw_addr1; /* for BAR 1 */ + + /* feature control */ + bool rxcsum; + bool lro; + bool jumbo_frame; + + /* rx buffer related */ + unsigned skb_buf_size; + int rx_buf_per_pkt; /* only apply to the 1st ring */ + dma_addr_t shared_pa; + dma_addr_t queue_desc_pa; + + /* Wake-on-LAN */ + u32 wol; + + /* Link speed */ + u32 link_speed; /* in mbps */ + + u64 tx_timeout_count; + struct work_struct work; + + unsigned long state; /* VMXNET3_STATE_BIT_xxx */ + + int dev_number; +}; + +#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ + writel((val), (adapter)->hw_addr0 + (reg)) +#define VMXNET3_READ_BAR0_REG(adapter, reg) \ + readl((adapter)->hw_addr0 + (reg)) + +#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \ + writel((val), (adapter)->hw_addr1 + (reg)) +#define VMXNET3_READ_BAR1_REG(adapter, reg) \ + readl((adapter)->hw_addr1 + (reg)) + +#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5) +#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ + ((rq)->rx_ring[ring_idx].size >> 3) + +#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma)) +#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32)) + +/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ +#define VMXNET3_DEF_TX_RING_SIZE 512 +#define VMXNET3_DEF_RX_RING_SIZE 256 + +#define VMXNET3_MAX_ETH_HDR_SIZE 22 +#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024) + +int +vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter); + +int +vmxnet3_activate_dev(struct vmxnet3_adapter *adapter); + +void +vmxnet3_force_close(struct vmxnet3_adapter *adapter); + +void +vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); + +void +vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, + struct vmxnet3_adapter *adapter); + +void +vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, + struct vmxnet3_adapter *adapter); + +int +vmxnet3_create_queues(struct vmxnet3_adapter *adapter, + u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size); + +extern void vmxnet3_set_ethtool_ops(struct net_device *netdev); +extern struct net_device_stats *vmxnet3_get_stats(struct net_device *netdev); + +extern char vmxnet3_driver_name[]; +#endif diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index cf5fd17ad70..f1bff98acd1 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -58,8 +58,7 @@ struct cisco_state { spinlock_t lock; unsigned long last_poll; int up; - int request_sent; - u32 txseq; /* TX sequence number */ + u32 txseq; /* TX sequence number, 0 = none */ u32 rxseq; /* RX sequence number */ }; @@ -163,6 +162,7 @@ static int cisco_rx(struct sk_buff *skb) struct cisco_packet *cisco_data; struct in_device *in_dev; __be32 addr, mask; + u32 ack; if (skb->len < sizeof(struct hdlc_header)) goto rx_error; @@ -223,8 +223,10 @@ static int cisco_rx(struct sk_buff *skb) case CISCO_KEEPALIVE_REQ: spin_lock(&st->lock); st->rxseq = ntohl(cisco_data->par1); - if (st->request_sent && - ntohl(cisco_data->par2) == st->txseq) { + ack = ntohl(cisco_data->par2); + if (ack && (ack == st->txseq || + /* our current REQ may be in transit */ + ack == st->txseq - 1)) { st->last_poll = jiffies; if (!st->up) { u32 sec, min, hrs, days; @@ -275,7 +277,6 @@ static void cisco_timer(unsigned long arg) cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), htonl(st->rxseq)); - st->request_sent = 1; spin_unlock(&st->lock); st->timer.expires = jiffies + st->settings.interval * HZ; @@ -293,9 +294,7 @@ static void cisco_start(struct net_device *dev) unsigned long flags; spin_lock_irqsave(&st->lock, flags); - st->up = 0; - st->request_sent = 0; - st->txseq = st->rxseq = 0; + st->up = st->txseq = st->rxseq = 0; spin_unlock_irqrestore(&st->lock, flags); init_timer(&st->timer); @@ -317,8 +316,7 @@ static void cisco_stop(struct net_device *dev) spin_lock_irqsave(&st->lock, flags); netif_dormant_on(dev); - st->up = 0; - st->request_sent = 0; + st->up = st->txseq = 0; spin_unlock_irqrestore(&st->lock, flags); } diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h index 4f6ab132218..b07e4d3a6b4 100644 --- a/drivers/net/wireless/adm8211.h +++ b/drivers/net/wireless/adm8211.h @@ -266,7 +266,7 @@ do { \ #define ADM8211_SYNCTL_CS1 (1 << 28) #define ADM8211_SYNCTL_CAL (1 << 27) #define ADM8211_SYNCTL_SELCAL (1 << 26) -#define ADM8211_SYNCTL_RFtype ((1 << 24) || (1 << 23) || (1 << 22)) +#define ADM8211_SYNCTL_RFtype ((1 << 24) | (1 << 23) | (1 << 22)) #define ADM8211_SYNCTL_RFMD (1 << 22) #define ADM8211_SYNCTL_GENERAL (0x7 << 22) /* SYNCTL 21:0 Data (Si4126: 18-bit data, 4-bit address) */ diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 7116a1aa20c..abf896a7390 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c @@ -4790,9 +4790,8 @@ static int proc_stats_rid_open( struct inode *inode, static int get_dec_u16( char *buffer, int *start, int limit ) { u16 value; int valid = 0; - for( value = 0; buffer[*start] >= '0' && - buffer[*start] <= '9' && - *start < limit; (*start)++ ) { + for (value = 0; *start < limit && buffer[*start] >= '0' && + buffer[*start] <= '9'; (*start)++) { valid = 1; value *= 10; value += buffer[*start] - '0'; diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c index 16a271787b8..1895d63aad0 100644 --- a/drivers/net/wireless/ath/ath9k/rc.c +++ b/drivers/net/wireless/ath/ath9k/rc.c @@ -679,7 +679,7 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc, return rate; if (rate_table->info[rate].valid_single_stream && - !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)); + !(ath_rc_priv->ht_cap & WLAN_RC_DS_FLAG)) return rate; /* This should not happen */ diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h index fa1549a03c7..660716214d4 100644 --- a/drivers/net/wireless/b43/b43.h +++ b/drivers/net/wireless/b43/b43.h @@ -607,82 +607,7 @@ struct b43_qos_params { struct ieee80211_tx_queue_params p; }; -struct b43_wldev; - -/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ -struct b43_wl { - /* Pointer to the active wireless device on this chip */ - struct b43_wldev *current_dev; - /* Pointer to the ieee80211 hardware data structure */ - struct ieee80211_hw *hw; - - /* Global driver mutex. Every operation must run with this mutex locked. */ - struct mutex mutex; - /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ - * handler, only. This basically is just the IRQ mask register. */ - spinlock_t hardirq_lock; - - /* The number of queues that were registered with the mac80211 subsystem - * initially. This is a backup copy of hw->queues in case hw->queues has - * to be dynamically lowered at runtime (Firmware does not support QoS). - * hw->queues has to be restored to the original value before unregistering - * from the mac80211 subsystem. */ - u16 mac80211_initially_registered_queues; - - /* We can only have one operating interface (802.11 core) - * at a time. General information about this interface follows. - */ - - struct ieee80211_vif *vif; - /* The MAC address of the operating interface. */ - u8 mac_addr[ETH_ALEN]; - /* Current BSSID */ - u8 bssid[ETH_ALEN]; - /* Interface type. (NL80211_IFTYPE_XXX) */ - int if_type; - /* Is the card operating in AP, STA or IBSS mode? */ - bool operating; - /* filter flags */ - unsigned int filter_flags; - /* Stats about the wireless interface */ - struct ieee80211_low_level_stats ieee_stats; - -#ifdef CONFIG_B43_HWRNG - struct hwrng rng; - bool rng_initialized; - char rng_name[30 + 1]; -#endif /* CONFIG_B43_HWRNG */ - - /* List of all wireless devices on this chip */ - struct list_head devlist; - u8 nr_devs; - - bool radiotap_enabled; - bool radio_enabled; - - /* The beacon we are currently using (AP or IBSS mode). */ - struct sk_buff *current_beacon; - bool beacon0_uploaded; - bool beacon1_uploaded; - bool beacon_templates_virgin; /* Never wrote the templates? */ - struct work_struct beacon_update_trigger; - - /* The current QOS parameters for the 4 queues. */ - struct b43_qos_params qos_params[4]; - - /* Work for adjustment of the transmission power. - * This is scheduled when we determine that the actual TX output - * power doesn't match what we want. */ - struct work_struct txpower_adjust_work; - - /* Packet transmit work */ - struct work_struct tx_work; - /* Queue of packets to be transmitted. */ - struct sk_buff_head tx_queue; - - /* The device LEDs. */ - struct b43_leds leds; -}; +struct b43_wl; /* The type of the firmware file. */ enum b43_firmware_file_type { @@ -824,6 +749,97 @@ struct b43_wldev { #endif }; +/* + * Include goes here to avoid a dependency problem. + * A better fix would be to integrate xmit.h into b43.h. + */ +#include "xmit.h" + +/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ +struct b43_wl { + /* Pointer to the active wireless device on this chip */ + struct b43_wldev *current_dev; + /* Pointer to the ieee80211 hardware data structure */ + struct ieee80211_hw *hw; + + /* Global driver mutex. Every operation must run with this mutex locked. */ + struct mutex mutex; + /* Hard-IRQ spinlock. This lock protects things used in the hard-IRQ + * handler, only. This basically is just the IRQ mask register. */ + spinlock_t hardirq_lock; + + /* The number of queues that were registered with the mac80211 subsystem + * initially. This is a backup copy of hw->queues in case hw->queues has + * to be dynamically lowered at runtime (Firmware does not support QoS). + * hw->queues has to be restored to the original value before unregistering + * from the mac80211 subsystem. */ + u16 mac80211_initially_registered_queues; + + /* We can only have one operating interface (802.11 core) + * at a time. General information about this interface follows. + */ + + struct ieee80211_vif *vif; + /* The MAC address of the operating interface. */ + u8 mac_addr[ETH_ALEN]; + /* Current BSSID */ + u8 bssid[ETH_ALEN]; + /* Interface type. (NL80211_IFTYPE_XXX) */ + int if_type; + /* Is the card operating in AP, STA or IBSS mode? */ + bool operating; + /* filter flags */ + unsigned int filter_flags; + /* Stats about the wireless interface */ + struct ieee80211_low_level_stats ieee_stats; + +#ifdef CONFIG_B43_HWRNG + struct hwrng rng; + bool rng_initialized; + char rng_name[30 + 1]; +#endif /* CONFIG_B43_HWRNG */ + + /* List of all wireless devices on this chip */ + struct list_head devlist; + u8 nr_devs; + + bool radiotap_enabled; + bool radio_enabled; + + /* The beacon we are currently using (AP or IBSS mode). */ + struct sk_buff *current_beacon; + bool beacon0_uploaded; + bool beacon1_uploaded; + bool beacon_templates_virgin; /* Never wrote the templates? */ + struct work_struct beacon_update_trigger; + + /* The current QOS parameters for the 4 queues. */ + struct b43_qos_params qos_params[4]; + + /* Work for adjustment of the transmission power. + * This is scheduled when we determine that the actual TX output + * power doesn't match what we want. */ + struct work_struct txpower_adjust_work; + + /* Packet transmit work */ + struct work_struct tx_work; + /* Queue of packets to be transmitted. */ + struct sk_buff_head tx_queue; + + /* The device LEDs. */ + struct b43_leds leds; + +#ifdef CONFIG_B43_PIO + /* + * RX/TX header/tail buffers used by the frame transmit functions. + */ + struct b43_rxhdr_fw4 rxhdr; + struct b43_txhdr txhdr; + u8 rx_tail[4]; + u8 tx_tail[4]; +#endif /* CONFIG_B43_PIO */ +}; + static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw) { return hw->priv; diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c index 8701034569f..de4e804bedf 100644 --- a/drivers/net/wireless/b43/dma.c +++ b/drivers/net/wireless/b43/dma.c @@ -1157,8 +1157,9 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot) } static int dma_tx_fragment(struct b43_dmaring *ring, - struct sk_buff *skb) + struct sk_buff **in_skb) { + struct sk_buff *skb = *in_skb; const struct b43_dma_ops *ops = ring->ops; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 *header; @@ -1224,8 +1225,14 @@ static int dma_tx_fragment(struct b43_dmaring *ring, } memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); + memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb)); + bounce_skb->dev = skb->dev; + skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb)); + info = IEEE80211_SKB_CB(bounce_skb); + dev_kfree_skb_any(skb); skb = bounce_skb; + *in_skb = bounce_skb; meta->skb = skb; meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { @@ -1355,7 +1362,11 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb) * static, so we don't need to store it per frame. */ ring->queue_prio = skb_get_queue_mapping(skb); - err = dma_tx_fragment(ring, skb); + /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing + * into the skb data or cb now. */ + hdr = NULL; + info = NULL; + err = dma_tx_fragment(ring, &skb); if (unlikely(err == -ENOKEY)) { /* Drop this packet, as we don't have the encryption key * anymore and must not transmit it unencrypted. */ diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c index fbe3d4f62ce..1e8dba48800 100644 --- a/drivers/net/wireless/b43/leds.c +++ b/drivers/net/wireless/b43/leds.c @@ -348,9 +348,9 @@ void b43_leds_register(struct b43_wldev *dev) } } -void b43_leds_unregister(struct b43_wldev *dev) +void b43_leds_unregister(struct b43_wl *wl) { - struct b43_leds *leds = &dev->wl->leds; + struct b43_leds *leds = &wl->leds; b43_unregister_led(&leds->led_tx); b43_unregister_led(&leds->led_rx); diff --git a/drivers/net/wireless/b43/leds.h b/drivers/net/wireless/b43/leds.h index 9592e4c5a5f..32b66d53cda 100644 --- a/drivers/net/wireless/b43/leds.h +++ b/drivers/net/wireless/b43/leds.h @@ -1,6 +1,7 @@ #ifndef B43_LEDS_H_ #define B43_LEDS_H_ +struct b43_wl; struct b43_wldev; #ifdef CONFIG_B43_LEDS @@ -60,7 +61,7 @@ enum b43_led_behaviour { }; void b43_leds_register(struct b43_wldev *dev); -void b43_leds_unregister(struct b43_wldev *dev); +void b43_leds_unregister(struct b43_wl *wl); void b43_leds_init(struct b43_wldev *dev); void b43_leds_exit(struct b43_wldev *dev); void b43_leds_stop(struct b43_wldev *dev); @@ -76,7 +77,7 @@ struct b43_leds { static inline void b43_leds_register(struct b43_wldev *dev) { } -static inline void b43_leds_unregister(struct b43_wldev *dev) +static inline void b43_leds_unregister(struct b43_wl *wl) { } static inline void b43_leds_init(struct b43_wldev *dev) diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 9b907a36bb8..86f35827f00 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -3874,6 +3874,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev) { struct b43_wl *wl = dev->wl; struct b43_wldev *orig_dev; + u32 mask; redo: if (!dev || b43_status(dev) < B43_STAT_STARTED) @@ -3920,7 +3921,8 @@ redo: goto redo; return dev; } - B43_WARN_ON(b43_read32(dev, B43_MMIO_GEN_IRQ_MASK)); + mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK); + B43_WARN_ON(mask != 0xFFFFFFFF && mask); /* Drain the TX queue */ while (skb_queue_len(&wl->tx_queue)) @@ -4997,7 +4999,7 @@ static void b43_remove(struct ssb_device *dev) if (list_empty(&wl->devlist)) { b43_rng_exit(wl); - b43_leds_unregister(wldev); + b43_leds_unregister(wl); /* Last core on the chip unregistered. * We can destroy common struct b43_wl. */ diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c index 5e87650b07f..9b904440021 100644 --- a/drivers/net/wireless/b43/pio.c +++ b/drivers/net/wireless/b43/pio.c @@ -332,6 +332,7 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, unsigned int data_len) { struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; const u8 *data = _data; ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI; @@ -341,13 +342,12 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); if (data_len & 1) { - u8 tail[2] = { 0, }; - /* Write the last byte. */ ctl &= ~B43_PIO_TXCTL_WRITEHI; b43_piotx_write16(q, B43_PIO_TXCTL, ctl); - tail[0] = data[data_len - 1]; - ssb_block_write(dev->dev, tail, 2, + wl->tx_tail[0] = data[data_len - 1]; + wl->tx_tail[1] = 0; + ssb_block_write(dev->dev, wl->tx_tail, 2, q->mmio_base + B43_PIO_TXDATA, sizeof(u16)); } @@ -382,6 +382,7 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, unsigned int data_len) { struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; const u8 *data = _data; ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 | @@ -392,29 +393,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); if (data_len & 3) { - u8 tail[4] = { 0, }; - + wl->tx_tail[3] = 0; /* Write the last few bytes. */ ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31); switch (data_len & 3) { case 3: ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; - tail[0] = data[data_len - 3]; - tail[1] = data[data_len - 2]; - tail[2] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 3]; + wl->tx_tail[1] = data[data_len - 2]; + wl->tx_tail[2] = data[data_len - 1]; break; case 2: ctl |= B43_PIO8_TXCTL_8_15; - tail[0] = data[data_len - 2]; - tail[1] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 2]; + wl->tx_tail[1] = data[data_len - 1]; + wl->tx_tail[2] = 0; break; case 1: - tail[0] = data[data_len - 1]; + wl->tx_tail[0] = data[data_len - 1]; + wl->tx_tail[1] = 0; + wl->tx_tail[2] = 0; break; } b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); - ssb_block_write(dev->dev, tail, 4, + ssb_block_write(dev->dev, wl->tx_tail, 4, q->mmio_base + B43_PIO8_TXDATA, sizeof(u32)); } @@ -446,8 +449,9 @@ static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack, static int pio_tx_frame(struct b43_pio_txqueue *q, struct sk_buff *skb) { + struct b43_wldev *dev = q->dev; + struct b43_wl *wl = dev->wl; struct b43_pio_txpacket *pack; - struct b43_txhdr txhdr; u16 cookie; int err; unsigned int hdrlen; @@ -458,8 +462,8 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, struct b43_pio_txpacket, list); cookie = generate_cookie(q, pack); - hdrlen = b43_txhdr_size(q->dev); - err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb, + hdrlen = b43_txhdr_size(dev); + err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, info, cookie); if (err) return err; @@ -467,15 +471,15 @@ static int pio_tx_frame(struct b43_pio_txqueue *q, if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ - b43_shm_write16(q->dev, B43_SHM_SHARED, + b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } pack->skb = skb; if (q->rev >= 8) - pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen); + pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); else - pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen); + pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); /* Remove it from the list of available packet slots. * It will be put back when we receive the status report. */ @@ -615,14 +619,14 @@ void b43_pio_get_tx_stats(struct b43_wldev *dev, static bool pio_rx_frame(struct b43_pio_rxqueue *q) { struct b43_wldev *dev = q->dev; - struct b43_rxhdr_fw4 rxhdr; + struct b43_wl *wl = dev->wl; u16 len; u32 macstat; unsigned int i, padding; struct sk_buff *skb; const char *err_msg = NULL; - memset(&rxhdr, 0, sizeof(rxhdr)); + memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); /* Check if we have data and wait for it to get ready. */ if (q->rev >= 8) { @@ -660,16 +664,16 @@ data_ready: /* Get the preamble (RX header) */ if (q->rev >= 8) { - ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), + ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); } else { - ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr), + ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); } /* Sanity checks. */ - len = le16_to_cpu(rxhdr.frame_len); + len = le16_to_cpu(wl->rxhdr.frame_len); if (unlikely(len > 0x700)) { err_msg = "len > 0x700"; goto rx_error; @@ -679,7 +683,7 @@ data_ready: goto rx_error; } - macstat = le32_to_cpu(rxhdr.mac_status); + macstat = le32_to_cpu(wl->rxhdr.mac_status); if (macstat & B43_RX_MAC_FCSERR) { if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { /* Drop frames with failed FCS. */ @@ -704,24 +708,22 @@ data_ready: q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); if (len & 3) { - u8 tail[4] = { 0, }; - /* Read the last few bytes. */ - ssb_block_read(dev->dev, tail, 4, + ssb_block_read(dev->dev, wl->rx_tail, 4, q->mmio_base + B43_PIO8_RXDATA, sizeof(u32)); switch (len & 3) { case 3: - skb->data[len + padding - 3] = tail[0]; - skb->data[len + padding - 2] = tail[1]; - skb->data[len + padding - 1] = tail[2]; + skb->data[len + padding - 3] = wl->rx_tail[0]; + skb->data[len + padding - 2] = wl->rx_tail[1]; + skb->data[len + padding - 1] = wl->rx_tail[2]; break; case 2: - skb->data[len + padding - 2] = tail[0]; - skb->data[len + padding - 1] = tail[1]; + skb->data[len + padding - 2] = wl->rx_tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[1]; break; case 1: - skb->data[len + padding - 1] = tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[0]; break; } } @@ -730,17 +732,15 @@ data_ready: q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); if (len & 1) { - u8 tail[2] = { 0, }; - /* Read the last byte. */ - ssb_block_read(dev->dev, tail, 2, + ssb_block_read(dev->dev, wl->rx_tail, 2, q->mmio_base + B43_PIO_RXDATA, sizeof(u16)); - skb->data[len + padding - 1] = tail[0]; + skb->data[len + padding - 1] = wl->rx_tail[0]; } } - b43_rx(q->dev, skb, &rxhdr); + b43_rx(q->dev, skb, &wl->rxhdr); return 1; diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c index 7a3218c5ba7..ffdce6f3c90 100644 --- a/drivers/net/wireless/b43/rfkill.c +++ b/drivers/net/wireless/b43/rfkill.c @@ -33,7 +33,8 @@ bool b43_is_hw_radio_enabled(struct b43_wldev *dev) & B43_MMIO_RADIO_HWENABLED_HI_MASK)) return 1; } else { - if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) + if (b43_status(dev) >= B43_STAT_STARTED && + b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO) & B43_MMIO_RADIO_HWENABLED_LO_MASK) return 1; } diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c index ac9f600995e..f4e9695ec18 100644 --- a/drivers/net/wireless/b43/xmit.c +++ b/drivers/net/wireless/b43/xmit.c @@ -27,7 +27,7 @@ */ -#include "xmit.h" +#include "b43.h" #include "phy_common.h" #include "dma.h" #include "pio.h" @@ -690,7 +690,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr) } memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); + + local_bh_disable(); ieee80211_rx(dev->wl->hw, skb); + local_bh_enable(); #if B43_DEBUG dev->rx_count++; diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c index 240cff1e697..a741d37fd96 100644 --- a/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/ipw2x00/ipw2100.c @@ -6325,8 +6325,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, fail: if (dev) { - if (registered) + if (registered) { + unregister_ieee80211(priv->ieee); unregister_netdev(dev); + } ipw2100_hw_stop_adapter(priv); @@ -6383,6 +6385,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) /* Unregister the device first - this results in close() * being called if the device is open. If we free storage * first, then close() will crash. */ + unregister_ieee80211(priv->ieee); unregister_netdev(dev); /* ipw2100_down will ensure that there is no more pending work diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 827824d45de..9b0f2c0646e 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -11822,6 +11822,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev, if (err) { IPW_ERROR("Failed to register promiscuous network " "device (error %d).\n", err); + unregister_ieee80211(priv->ieee); unregister_netdev(priv->net_dev); goto out_remove_sysfs; } @@ -11872,6 +11873,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev) mutex_unlock(&priv->mutex); + unregister_ieee80211(priv->ieee); unregister_netdev(priv->net_dev); if (priv->rxq) { diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h index bf45391172f..f42ade6c2d3 100644 --- a/drivers/net/wireless/ipw2x00/libipw.h +++ b/drivers/net/wireless/ipw2x00/libipw.h @@ -1020,6 +1020,7 @@ static inline int libipw_is_cck_rate(u8 rate) /* ieee80211.c */ extern void free_ieee80211(struct net_device *dev, int monitor); extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); +extern void unregister_ieee80211(struct libipw_device *ieee); extern int libipw_change_mtu(struct net_device *dev, int new_mtu); extern void libipw_networks_age(struct libipw_device *ieee, diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c index a0e9f6aed7d..be5b809ec97 100644 --- a/drivers/net/wireless/ipw2x00/libipw_module.c +++ b/drivers/net/wireless/ipw2x00/libipw_module.c @@ -235,16 +235,19 @@ void free_ieee80211(struct net_device *dev, int monitor) libipw_networks_free(ieee); /* free cfg80211 resources */ - if (!monitor) { - wiphy_unregister(ieee->wdev.wiphy); - kfree(ieee->a_band.channels); - kfree(ieee->bg_band.channels); + if (!monitor) wiphy_free(ieee->wdev.wiphy); - } free_netdev(dev); } +void unregister_ieee80211(struct libipw_device *ieee) +{ + wiphy_unregister(ieee->wdev.wiphy); + kfree(ieee->a_band.channels); + kfree(ieee->bg_band.channels); +} + #ifdef CONFIG_LIBIPW_DEBUG static int debug = 0; @@ -330,3 +333,4 @@ module_init(libipw_init); EXPORT_SYMBOL(alloc_ieee80211); EXPORT_SYMBOL(free_ieee80211); +EXPORT_SYMBOL(unregister_ieee80211); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c index a16bd4147ea..cbb0585083a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c @@ -702,7 +702,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, u8 sta_id = iwl_find_station(priv, hdr->addr1); if (sta_id == IWL_INVALID_STATION) { - IWL_DEBUG_RATE(priv, "LQ: ADD station %pm\n", + IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", hdr->addr1); sta_id = iwl_add_station(priv, hdr->addr1, false, CMD_ASYNC, NULL); diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 68136172b82..f059b49dc69 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c @@ -611,7 +611,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv, if (rx_status.band == IEEE80211_BAND_5GHZ) rx_status.rate_idx -= IWL_FIRST_OFDM_RATE; - rx_status.antenna = le16_to_cpu(rx_hdr->phy_flags & + rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; /* set the preamble flag if appropriate */ diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index d6bc0e05104..6e6f516ba40 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -318,7 +318,7 @@ static void iwl5000_gain_computation(struct iwl_priv *priv, (s32)average_noise[i])) / 1500; /* bound gain by 2 bits value max, 3rd bit is sign */ data->delta_gain_code[i] = - min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE); + min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE); if (delta_g < 0) /* set negative sign */ diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index 313d3e5ee84..eaafae091f5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -3106,8 +3106,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_pci_disable_device: pci_disable_device(pdev); out_ieee80211_free_hw: - ieee80211_free_hw(priv->hw); iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); out: return err; } diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h index 2c5c88fc38f..4afaf773aea 100644 --- a/drivers/net/wireless/iwlwifi/iwl-commands.h +++ b/drivers/net/wireless/iwlwifi/iwl-commands.h @@ -1154,7 +1154,7 @@ struct iwl_wep_cmd { #define RX_RES_PHY_FLAGS_MOD_CCK_MSK cpu_to_le16(1 << 1) #define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK cpu_to_le16(1 << 2) #define RX_RES_PHY_FLAGS_NARROW_BAND_MSK cpu_to_le16(1 << 3) -#define RX_RES_PHY_FLAGS_ANTENNA_MSK cpu_to_le16(0xf0) +#define RX_RES_PHY_FLAGS_ANTENNA_MSK 0xf0 #define RX_RES_PHY_FLAGS_ANTENNA_POS 4 #define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c index 3d2b93a61e6..e14c9952a93 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c @@ -410,7 +410,6 @@ static int iwl_find_otp_image(struct iwl_priv *priv, u16 *validblockaddr) { u16 next_link_addr = 0, link_value = 0, valid_addr; - int ret = 0; int usedblocks = 0; /* set addressing mode to absolute to traverse the link list */ @@ -430,29 +429,29 @@ static int iwl_find_otp_image(struct iwl_priv *priv, * check for more block on the link list */ valid_addr = next_link_addr; - next_link_addr = link_value; + next_link_addr = link_value * sizeof(u16); IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", usedblocks, next_link_addr); if (iwl_read_otp_word(priv, next_link_addr, &link_value)) return -EINVAL; if (!link_value) { /* - * reach the end of link list, + * reach the end of link list, return success and * set address point to the starting address * of the image */ - goto done; + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; } /* more in the link list, continue */ usedblocks++; - } while (usedblocks < priv->cfg->max_ll_items); - /* OTP full, use last block */ - IWL_DEBUG_INFO(priv, "OTP is full, use last block\n"); -done: - *validblockaddr = valid_addr; - /* skip first 2 bytes (link list pointer) */ - *validblockaddr += 2; - return ret; + } while (usedblocks <= priv->cfg->max_ll_items); + + /* OTP has no valid blocks */ + IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); + return -EINVAL; } /** diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h index 6b68db7b1b8..80b9e45d9b9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h +++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h @@ -220,35 +220,35 @@ struct iwl_eeprom_enhanced_txpwr { * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_) */ /* 2.4 GHz band: CCK */ -#define EEPROM_LB_CCK_20_COMMON ((0xAA)\ +#define EEPROM_LB_CCK_20_COMMON ((0xA8)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */ /* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ -#define EEPROM_LB_OFDM_COMMON ((0xB2)\ +#define EEPROM_LB_OFDM_COMMON ((0xB0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */ -#define EEPROM_HB_OFDM_COMMON ((0xCA)\ +#define EEPROM_HB_OFDM_COMMON ((0xC8)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 2.4GHz band channels: * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */ -#define EEPROM_LB_OFDM_20_BAND ((0xE2)\ +#define EEPROM_LB_OFDM_20_BAND ((0xE0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */ /* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */ -#define EEPROM_LB_OFDM_HT40_BAND ((0x122)\ +#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */ /* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */ -#define EEPROM_HB_OFDM_20_BAND ((0x14A)\ +#define EEPROM_HB_OFDM_20_BAND ((0x148)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */ /* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */ -#define EEPROM_HB_OFDM_HT40_BAND ((0x17A)\ +#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ /* 2.4 GHz band, channnel 13: Legacy, HT */ -#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x192)\ +#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ /* 5.2 GHz band, channnel 140: Legacy, HT */ -#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A2)\ +#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ /* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */ -#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B2)\ +#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\ | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */ diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 8e1bb53c0aa..493626bcd3e 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -1044,7 +1044,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, * as a bitmask. */ rx_status.antenna = - le16_to_cpu(phy_res->phy_flags & RX_RES_PHY_FLAGS_ANTENNA_MSK) + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> RX_RES_PHY_FLAGS_ANTENNA_POS; /* set the preamble flag if appropriate */ diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c index aa49230422f..d00a8033409 100644 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c @@ -4097,8 +4097,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); out_ieee80211_free_hw: - ieee80211_free_hw(priv->hw); iwl_free_traffic_mem(priv); + ieee80211_free_hw(priv->hw); out: return err; } diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c index c42d3faa266..23f684337fd 100644 --- a/drivers/net/wireless/libertas/cmdresp.c +++ b/drivers/net/wireless/libertas/cmdresp.c @@ -3,6 +3,7 @@ * responses as well as events generated by firmware. */ #include <linux/delay.h> +#include <linux/sched.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <asm/unaligned.h> diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c index cb8be8d7abc..5b3672c4d0c 100644 --- a/drivers/net/wireless/libertas/if_spi.c +++ b/drivers/net/wireless/libertas/if_spi.c @@ -134,7 +134,7 @@ static void spu_transaction_finish(struct if_spi_card *card) static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) { int err = 0; - u16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); + __le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK); struct spi_message m; struct spi_transfer reg_trans; struct spi_transfer data_trans; @@ -166,7 +166,7 @@ static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len) static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val) { - u16 buff; + __le16 buff; buff = cpu_to_le16(val); return spu_write(card, reg, (u8 *)&buff, sizeof(u16)); @@ -188,7 +188,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len) { unsigned int delay; int err = 0; - u16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); + __le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK); struct spi_message m; struct spi_transfer reg_trans; struct spi_transfer dummy_trans; @@ -235,7 +235,7 @@ static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len) /* Read 16 bits from an SPI register */ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) { - u16 buf; + __le16 buf; int ret; ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); @@ -248,7 +248,7 @@ static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val) * The low 16 bits are read first. */ static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val) { - u32 buf; + __le32 buf; int err; err = spu_read(card, reg, (u8 *)&buf, sizeof(buf)); diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c index 92bc8c5f1ca..3fac4efa5ac 100644 --- a/drivers/net/wireless/libertas/if_usb.c +++ b/drivers/net/wireless/libertas/if_usb.c @@ -508,7 +508,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, /* Fill the receive configuration URB and initialise the Rx call back */ usb_fill_bulk_urb(cardp->rx_urb, cardp->udev, usb_rcvbulkpipe(cardp->udev, cardp->ep_in), - (void *) (skb->tail + (size_t) IPFIELD_ALIGN_OFFSET), + skb->data + IPFIELD_ALIGN_OFFSET, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp); diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 88cd58eb3b9..1c88c2ea59a 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -2879,7 +2879,7 @@ static int write_essid(struct file *file, const char __user *buffer, unsigned long count, void *data) { static char proc_essid[33]; - int len = count; + unsigned int len = count; if (len > 32) len = 32; diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index a084077a1c6..9fe770f7d7b 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c @@ -1994,7 +1994,7 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? - (skbdesc->entry->entry_idx + 1) : 0xff); + txdesc->key_idx : 0xff); rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, skb->len - txdesc->l2pad); rt2x00_set_field32(&word, TXWI_W1_PACKETID, diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index b8f5ee33445..14e7bb21007 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2389,10 +2389,13 @@ static struct usb_device_id rt73usb_device_table[] = { { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) }, /* MSI */ + { USB_DEVICE(0x0db0, 0x4600), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x0db0, 0xa861), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x0db0, 0xa874), USB_DEVICE_DATA(&rt73usb_ops) }, + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x7318), USB_DEVICE_DATA(&rt73usb_ops) }, /* Ralink */ { USB_DEVICE(0x04bb, 0x093d), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x148f, 0x2573), USB_DEVICE_DATA(&rt73usb_ops) }, @@ -2420,6 +2423,8 @@ static struct usb_device_id rt73usb_device_table[] = { /* Planex */ { USB_DEVICE(0x2019, 0xab01), USB_DEVICE_DATA(&rt73usb_ops) }, { USB_DEVICE(0x2019, 0xab50), USB_DEVICE_DATA(&rt73usb_ops) }, + /* WideTell */ + { USB_DEVICE(0x7167, 0x3840), USB_DEVICE_DATA(&rt73usb_ops) }, /* Zcom */ { USB_DEVICE(0x0cde, 0x001c), USB_DEVICE_DATA(&rt73usb_ops) }, /* ZyXEL */ diff --git a/drivers/net/znet.c b/drivers/net/znet.c index a0384b6f09b..b4234733375 100644 --- a/drivers/net/znet.c +++ b/drivers/net/znet.c @@ -169,7 +169,6 @@ static void znet_tx_timeout (struct net_device *dev); static int znet_request_resources (struct net_device *dev) { struct znet_private *znet = netdev_priv(dev); - unsigned long flags; if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev)) goto failed; @@ -187,13 +186,9 @@ static int znet_request_resources (struct net_device *dev) free_sia: release_region (znet->sia_base, znet->sia_size); free_tx_dma: - flags = claim_dma_lock(); free_dma (znet->tx_dma); - release_dma_lock (flags); free_rx_dma: - flags = claim_dma_lock(); free_dma (znet->rx_dma); - release_dma_lock (flags); free_irq: free_irq (dev->irq, dev); failed: @@ -203,14 +198,11 @@ static int znet_request_resources (struct net_device *dev) static void znet_release_resources (struct net_device *dev) { struct znet_private *znet = netdev_priv(dev); - unsigned long flags; release_region (znet->sia_base, znet->sia_size); release_region (dev->base_addr, znet->io_size); - flags = claim_dma_lock(); free_dma (znet->tx_dma); free_dma (znet->rx_dma); - release_dma_lock (flags); free_irq (dev->irq, dev); } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index bacaa536fd5..4b22ba568b1 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -97,6 +97,12 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) } EXPORT_SYMBOL(of_mdiobus_register); +/* Helper function for of_phy_find_device */ +static int of_phy_match(struct device *dev, void *phy_np) +{ + return dev_archdata_get_node(&dev->archdata) == phy_np; +} + /** * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node @@ -106,15 +112,10 @@ EXPORT_SYMBOL(of_mdiobus_register); struct phy_device *of_phy_find_device(struct device_node *phy_np) { struct device *d; - int match(struct device *dev, void *phy_np) - { - return dev_archdata_get_node(&dev->archdata) == phy_np; - } - if (!phy_np) return NULL; - d = bus_find_device(&mdio_bus_type, NULL, phy_np, match); + d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); return d ? to_phy_device(d) : NULL; } EXPORT_SYMBOL(of_phy_find_device); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a790b1771f9..245d2cdb476 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1009,7 +1009,7 @@ DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, static void __devinit quirk_amd_ide_mode(struct pci_dev *pdev) { - /* set SBX00 SATA in IDE mode to AHCI mode */ + /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ u8 tmp; pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); @@ -1028,8 +1028,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SB900_SATA_IDE, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 0959430534b..cb1a027eb55 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -299,17 +299,8 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned lon r = bus->resource[i]; if (r == &ioport_resource || r == &iomem_resource) continue; - if (r && (r->flags & type_mask) == type) { - if (!r->parent) - return r; - /* - * if there is no child under that, we should release - * and use it. don't need to reset it, pbus_size_* will - * set it again - */ - if (!r->child && !release_resource(r)) - return r; - } + if (r && (r->flags & type_mask) == type && !r->parent) + return r; } return NULL; } diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 4a110b7b267..6c4a4fc8363 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c @@ -1463,7 +1463,9 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t return -ENOMEM; } tuple.DesiredTuple = code; - tuple.Attributes = TUPLE_RETURN_COMMON; + tuple.Attributes = 0; + if (function == BIND_FN_ALL) + tuple.Attributes = TUPLE_RETURN_COMMON; ret = pccard_get_first_tuple(s, function, &tuple); if (ret != 0) goto done; @@ -1490,7 +1492,7 @@ EXPORT_SYMBOL(pccard_read_tuple); ======================================================================*/ -int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned int *info) +int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info) { tuple_t *tuple; cisparse_t *p; @@ -1515,30 +1517,30 @@ int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, unsigned count = reserved = 0; tuple->DesiredTuple = RETURN_FIRST_TUPLE; tuple->Attributes = TUPLE_RETURN_COMMON; - ret = pccard_get_first_tuple(s, function, tuple); + ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple); if (ret != 0) goto done; /* First tuple should be DEVICE; we should really have either that or a CFTABLE_ENTRY of some sort */ if ((tuple->TupleCode == CISTPL_DEVICE) || - (pccard_read_tuple(s, function, CISTPL_CFTABLE_ENTRY, p) == 0) || - (pccard_read_tuple(s, function, CISTPL_CFTABLE_ENTRY_CB, p) == 0)) + (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p) == 0) || + (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p) == 0)) dev_ok++; /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2 tuple, for card identification. Certain old D-Link and Linksys cards have only a broken VERS_2 tuple; hence the bogus test. */ - if ((pccard_read_tuple(s, function, CISTPL_MANFID, p) == 0) || - (pccard_read_tuple(s, function, CISTPL_VERS_1, p) == 0) || - (pccard_read_tuple(s, function, CISTPL_VERS_2, p) != -ENOSPC)) + if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) || + (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) || + (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC)) ident_ok++; if (!dev_ok && !ident_ok) goto done; for (count = 1; count < MAX_TUPLES; count++) { - ret = pccard_get_next_tuple(s, function, tuple); + ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple); if (ret != 0) break; if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) || diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c index 934d4bee39a..698d75cda08 100644 --- a/drivers/pcmcia/cs.c +++ b/drivers/pcmcia/cs.c @@ -98,10 +98,13 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem); * These functions check for the appropriate struct pcmcia_soket arrays, * and pass them to the low-level functions pcmcia_{suspend,resume}_socket */ +static int socket_early_resume(struct pcmcia_socket *skt); +static int socket_late_resume(struct pcmcia_socket *skt); static int socket_resume(struct pcmcia_socket *skt); static int socket_suspend(struct pcmcia_socket *skt); -int pcmcia_socket_dev_suspend(struct device *dev) +static void pcmcia_socket_dev_run(struct device *dev, + int (*cb)(struct pcmcia_socket *)) { struct pcmcia_socket *socket; @@ -110,29 +113,34 @@ int pcmcia_socket_dev_suspend(struct device *dev) if (socket->dev.parent != dev) continue; mutex_lock(&socket->skt_mutex); - socket_suspend(socket); + cb(socket); mutex_unlock(&socket->skt_mutex); } up_read(&pcmcia_socket_list_rwsem); +} +int pcmcia_socket_dev_suspend(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_suspend); return 0; } EXPORT_SYMBOL(pcmcia_socket_dev_suspend); -int pcmcia_socket_dev_resume(struct device *dev) +void pcmcia_socket_dev_early_resume(struct device *dev) { - struct pcmcia_socket *socket; + pcmcia_socket_dev_run(dev, socket_early_resume); +} +EXPORT_SYMBOL(pcmcia_socket_dev_early_resume); - down_read(&pcmcia_socket_list_rwsem); - list_for_each_entry(socket, &pcmcia_socket_list, socket_list) { - if (socket->dev.parent != dev) - continue; - mutex_lock(&socket->skt_mutex); - socket_resume(socket); - mutex_unlock(&socket->skt_mutex); - } - up_read(&pcmcia_socket_list_rwsem); +void pcmcia_socket_dev_late_resume(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_late_resume); +} +EXPORT_SYMBOL(pcmcia_socket_dev_late_resume); +int pcmcia_socket_dev_resume(struct device *dev) +{ + pcmcia_socket_dev_run(dev, socket_resume); return 0; } EXPORT_SYMBOL(pcmcia_socket_dev_resume); @@ -546,29 +554,24 @@ static int socket_suspend(struct pcmcia_socket *skt) return 0; } -/* - * Resume a socket. If a card is present, verify its CIS against - * our cached copy. If they are different, the card has been - * replaced, and we need to tell the drivers. - */ -static int socket_resume(struct pcmcia_socket *skt) +static int socket_early_resume(struct pcmcia_socket *skt) { - int ret; - - if (!(skt->state & SOCKET_SUSPEND)) - return -EBUSY; - skt->socket = dead_socket; skt->ops->init(skt); skt->ops->set_socket(skt, &skt->socket); + if (skt->state & SOCKET_PRESENT) + skt->resume_status = socket_setup(skt, resume_delay); + return 0; +} +static int socket_late_resume(struct pcmcia_socket *skt) +{ if (!(skt->state & SOCKET_PRESENT)) { skt->state &= ~SOCKET_SUSPEND; return socket_insert(skt); } - ret = socket_setup(skt, resume_delay); - if (ret == 0) { + if (skt->resume_status == 0) { /* * FIXME: need a better check here for cardbus cards. */ @@ -596,6 +599,20 @@ static int socket_resume(struct pcmcia_socket *skt) return 0; } +/* + * Resume a socket. If a card is present, verify its CIS against + * our cached copy. If they are different, the card has been + * replaced, and we need to tell the drivers. + */ +static int socket_resume(struct pcmcia_socket *skt) +{ + if (!(skt->state & SOCKET_SUSPEND)) + return -EBUSY; + + socket_early_resume(skt); + return socket_late_resume(skt); +} + static void socket_remove(struct pcmcia_socket *skt) { dev_printk(KERN_NOTICE, &skt->dev, diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index 79615e6d540..1f4098f1354 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h @@ -197,8 +197,7 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse); int pcmcia_replace_cis(struct pcmcia_socket *s, const u8 *data, const size_t len); -int pccard_validate_cis(struct pcmcia_socket *s, unsigned int function, - unsigned int *count); +int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count); /* rsrc_mgr.c */ int pcmcia_validate_mem(struct pcmcia_socket *s); diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c index 9f300d3cb12..f5b7079f13d 100644 --- a/drivers/pcmcia/ds.c +++ b/drivers/pcmcia/ds.c @@ -547,7 +547,7 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev) if (!vers1) return -ENOMEM; - if (!pccard_read_tuple(p_dev->socket, p_dev->func, + if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_MANFID, &manf_id)) { p_dev->manf_id = manf_id.manf; p_dev->card_id = manf_id.card; @@ -581,9 +581,9 @@ static int pcmcia_device_query(struct pcmcia_device *p_dev) kfree(devgeo); } - if (!pccard_read_tuple(p_dev->socket, p_dev->func, CISTPL_VERS_1, + if (!pccard_read_tuple(p_dev->socket, BIND_FN_ALL, CISTPL_VERS_1, vers1)) { - for (i=0; i < vers1->ns; i++) { + for (i = 0; i < min_t(unsigned int, 4, vers1->ns); i++) { char *tmp; unsigned int length; @@ -733,7 +733,7 @@ static int pcmcia_card_add(struct pcmcia_socket *s) return -EAGAIN; /* try again, but later... */ } - ret = pccard_validate_cis(s, BIND_FN_ALL, &no_chains); + ret = pccard_validate_cis(s, &no_chains); if (ret || !no_chains) { ds_dev_dbg(0, &s->dev, "invalid CIS or invalid resources\n"); return -ENODEV; diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index b906abe26ad..a4aacb830b8 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c @@ -1053,8 +1053,8 @@ static int i365_set_io_map(u_short sock, struct pccard_io_map *io) u_char map, ioctl; debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#x-%#x)\n", sock, io->map, io->flags, - io->speed, io->start, io->stop); + "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, + (unsigned long long)io->start, (unsigned long long)io->stop); map = io->map; if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index d1d89c4491a..7dfbee1dcd7 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c @@ -537,8 +537,9 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) u_char map; debug(3, "m32r_cfc: SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#lx-%#lx)\n", sock, io->map, io->flags, - io->speed, io->start, io->stop); + "%#llx-%#llx)\n", sock, io->map, io->flags, + io->speed, (unsigned long long)io->start, + (unsigned long long)io->stop); map = io->map; return 0; @@ -554,8 +555,9 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) pcc_socket_t *t = &socket[sock]; debug(3, "m32r_cfc: SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#lx, %#x)\n", sock, map, mem->flags, - mem->speed, mem->static_start, mem->card_start); + "%#llx, %#x)\n", sock, map, mem->flags, + mem->speed, (unsigned long long)mem->static_start, + mem->card_start); /* * sanity check diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c index a0655839c8d..c6524f99ccc 100644 --- a/drivers/pcmcia/m32r_pcc.c +++ b/drivers/pcmcia/m32r_pcc.c @@ -492,8 +492,9 @@ static int _pcc_set_io_map(u_short sock, struct pccard_io_map *io) u_char map; debug(3, "m32r-pcc: SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#x-%#x)\n", sock, io->map, io->flags, - io->speed, io->start, io->stop); + "%#llx-%#llx)\n", sock, io->map, io->flags, + io->speed, (unsigned long long)io->start, + (unsigned long long)io->stop); map = io->map; return 0; @@ -515,8 +516,9 @@ static int _pcc_set_mem_map(u_short sock, struct pccard_mem_map *mem) #endif debug(3, "m32r-pcc: SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#lx, %#x)\n", sock, map, mem->flags, - mem->speed, mem->static_start, mem->card_start); + "%#llx, %#x)\n", sock, map, mem->flags, + mem->speed, (unsigned long long)mem->static_start, + mem->card_start); /* * sanity check diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c index c69f2c4fe52..403559ba49d 100644 --- a/drivers/pcmcia/m8xx_pcmcia.c +++ b/drivers/pcmcia/m8xx_pcmcia.c @@ -975,8 +975,9 @@ static int m8xx_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) #define M8XX_BASE (PCMCIA_IO_WIN_BASE + io->start) dprintk("SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#4.4x-%#4.4x)\n", lsock, io->map, io->flags, - io->speed, io->start, io->stop); + "%#4.4llx-%#4.4llx)\n", lsock, io->map, io->flags, + io->speed, (unsigned long long)io->start, + (unsigned long long)io->stop); if ((io->map >= PCMCIA_IO_WIN_NO) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) @@ -1055,8 +1056,9 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, pcmconf8xx_t *pcmcia = s->pcmcia; dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, - mem->speed, mem->static_start, mem->card_start); + "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, + mem->speed, (unsigned long long)mem->static_start, + mem->card_start); if ((mem->map >= PCMCIA_MEM_WIN_NO) // || ((mem->s) >= PCMCIA_MEM_WIN_SIZE) @@ -1107,8 +1109,9 @@ static int m8xx_set_mem_map(struct pcmcia_socket *sock, } dprintk("SetMemMap(%d, %d, %#2.2x, %d ns, " - "%#5.5lx, %#5.5x)\n", lsock, mem->map, mem->flags, - mem->speed, mem->static_start, mem->card_start); + "%#5.5llx, %#5.5x)\n", lsock, mem->map, mem->flags, + mem->speed, (unsigned long long)mem->static_start, + mem->card_start); /* copy the struct and modify the copy */ diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index 32c44040c1e..30cf71d2ee2 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c @@ -881,7 +881,7 @@ static int ds_ioctl(struct inode * inode, struct file * file, mutex_lock(&s->skt_mutex); pcmcia_validate_mem(s); mutex_unlock(&s->skt_mutex); - ret = pccard_validate_cis(s, BIND_FN_ALL, &buf->cisinfo.Chains); + ret = pccard_validate_cis(s, &buf->cisinfo.Chains); break; case DS_SUSPEND_CARD: ret = pcmcia_suspend_card(s); diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 1c39d3438f2..70a33468bcd 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -641,6 +641,12 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev, if ((ret = pci_enable_device(dev))) goto err_out_free_mem; + if (!pci_resource_start(dev, 0)) { + printk(KERN_INFO "pd6729: refusing to load the driver " + "as the io_base is 0.\n"); + goto err_out_free_mem; + } + printk(KERN_INFO "pd6729: Cirrus PD6729 PCI to PCMCIA Bridge " "at 0x%llx on irq %d\n", (unsigned long long)pci_resource_start(dev, 0), dev->irq); diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 9ca22c7aafb..7039f3cf5b7 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -206,6 +206,7 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base, /* First, what does a floating port look like? */ b = kzalloc(256, GFP_KERNEL); if (!b) { + printk("\n"); dev_printk(KERN_ERR, &s->dev, "do_io_probe: unable to kmalloc 256 bytes"); return; @@ -275,7 +276,7 @@ static int readable(struct pcmcia_socket *s, struct resource *res, s->cis_mem.res = res; s->cis_virt = ioremap(res->start, s->map_size); if (s->cis_virt) { - ret = pccard_validate_cis(s, BIND_FN_ALL, count); + ret = pccard_validate_cis(s, count); /* invalidate mapping and CIS cache */ iounmap(s->cis_virt); s->cis_virt = NULL; diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index 163cf98e238..ef7e9e58782 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -336,8 +336,9 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock); unsigned short speed = map->speed; - debug(skt, 2, "map %u speed %u start 0x%08x stop 0x%08x\n", - map->map, map->speed, map->start, map->stop); + debug(skt, 2, "map %u speed %u start 0x%08llx stop 0x%08llx\n", + map->map, map->speed, (unsigned long long)map->start, + (unsigned long long)map->stop); debug(skt, 2, "flags: %s%s%s%s%s%s%s%s\n", (map->flags==0)?"<NONE>":"", (map->flags&MAP_ACTIVE)?"ACTIVE ":"", diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c index ff9a3bb3c88..78d5aab542f 100644 --- a/drivers/pcmcia/socket_sysfs.c +++ b/drivers/pcmcia/socket_sysfs.c @@ -300,7 +300,7 @@ static ssize_t pccard_show_cis(struct kobject *kobj, if (!(s->state & SOCKET_PRESENT)) return -ENODEV; - if (pccard_validate_cis(s, BIND_FN_ALL, &chains)) + if (pccard_validate_cis(s, &chains)) return -EIO; if (!chains) return -ENODATA; diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index 582413fcb62..6918849d511 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c @@ -732,8 +732,8 @@ static int tcic_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *io) u_short base, len, ioctl; debug(1, "SetIOMap(%d, %d, %#2.2x, %d ns, " - "%#x-%#x)\n", psock, io->map, io->flags, - io->speed, io->start, io->stop); + "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, + (unsigned long long)io->start, (unsigned long long)io->stop); if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || (io->stop < io->start)) return -EINVAL; tcic_setw(TCIC_ADDR+2, TCIC_ADR2_INDREG | (psock << TCIC_SS_SHFT)); diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index abe0e44c6e9..8be4cc447a1 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -1275,16 +1275,26 @@ static int yenta_dev_resume_noirq(struct device *dev) if (socket->type && socket->type->restore_state) socket->type->restore_state(socket); - return pcmcia_socket_dev_resume(dev); + pcmcia_socket_dev_early_resume(dev); + return 0; +} + +static int yenta_dev_resume(struct device *dev) +{ + pcmcia_socket_dev_late_resume(dev); + return 0; } static struct dev_pm_ops yenta_pm_ops = { .suspend_noirq = yenta_dev_suspend_noirq, .resume_noirq = yenta_dev_resume_noirq, + .resume = yenta_dev_resume, .freeze_noirq = yenta_dev_suspend_noirq, .thaw_noirq = yenta_dev_resume_noirq, + .thaw = yenta_dev_resume, .poweroff_noirq = yenta_dev_suspend_noirq, .restore_noirq = yenta_dev_resume_noirq, + .restore = yenta_dev_resume, }; #define YENTA_PM_OPS (¥ta_pm_ops) diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c index 749e2102b2b..d379e74a05d 100644 --- a/drivers/platform/x86/eeepc-laptop.c +++ b/drivers/platform/x86/eeepc-laptop.c @@ -150,6 +150,8 @@ struct eeepc_hotk { /* The actual device the driver binds to */ static struct eeepc_hotk *ehotk; +static void eeepc_rfkill_hotplug(bool real); + /* Platform device/driver */ static int eeepc_hotk_thaw(struct device *device); static int eeepc_hotk_restore(struct device *device); @@ -343,14 +345,23 @@ static bool eeepc_wlan_rfkill_blocked(void) static int eeepc_rfkill_set(void *data, bool blocked) { unsigned long asl = (unsigned long)data; - return set_acpi(asl, !blocked); + int ret; + + if (asl != CM_ASL_WLAN) + return set_acpi(asl, !blocked); + + /* hack to avoid panic with rt2860sta */ + if (blocked) + eeepc_rfkill_hotplug(false); + ret = set_acpi(asl, !blocked); + return ret; } static const struct rfkill_ops eeepc_rfkill_ops = { .set_block = eeepc_rfkill_set, }; -static void __init eeepc_enable_camera(void) +static void __devinit eeepc_enable_camera(void) { /* * If the following call to set_acpi() fails, it's because there's no @@ -643,13 +654,13 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot, return 0; } -static void eeepc_rfkill_hotplug(void) +static void eeepc_rfkill_hotplug(bool real) { struct pci_dev *dev; struct pci_bus *bus; - bool blocked = eeepc_wlan_rfkill_blocked(); + bool blocked = real ? eeepc_wlan_rfkill_blocked() : true; - if (ehotk->wlan_rfkill) + if (real && ehotk->wlan_rfkill) rfkill_set_sw_state(ehotk->wlan_rfkill, blocked); mutex_lock(&ehotk->hotplug_lock); @@ -692,7 +703,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data) if (event != ACPI_NOTIFY_BUS_CHECK) return; - eeepc_rfkill_hotplug(); + eeepc_rfkill_hotplug(true); } static void eeepc_hotk_notify(struct acpi_device *device, u32 event) @@ -850,7 +861,7 @@ static int eeepc_hotk_restore(struct device *device) { /* Refresh both wlan rfkill state and pci hotplug */ if (ehotk->wlan_rfkill) - eeepc_rfkill_hotplug(); + eeepc_rfkill_hotplug(true); if (ehotk->bluetooth_rfkill) rfkill_set_sw_state(ehotk->bluetooth_rfkill, @@ -993,7 +1004,7 @@ static void eeepc_rfkill_exit(void) * Refresh pci hotplug in case the rfkill state was changed after * eeepc_unregister_rfkill_notifier() */ - eeepc_rfkill_hotplug(); + eeepc_rfkill_hotplug(true); if (ehotk->hotplug_slot) pci_hp_deregister(ehotk->hotplug_slot); @@ -1109,7 +1120,7 @@ static int eeepc_rfkill_init(struct device *dev) * Refresh pci hotplug in case the rfkill state was changed during * setup. */ - eeepc_rfkill_hotplug(); + eeepc_rfkill_hotplug(true); exit: if (result && result != -ENODEV) @@ -1189,7 +1200,7 @@ static int eeepc_input_init(struct device *dev) return 0; } -static int eeepc_hotk_add(struct acpi_device *device) +static int __devinit eeepc_hotk_add(struct acpi_device *device) { struct device *dev; int result; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index f35aee5c214..bcd4ba8be7d 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -944,7 +944,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device); struct input_dev *input = fujitsu_hotkey->input; -#ifdef CONFIG_LEDS_CLASS +#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) if (fujitsu_hotkey->logolamp_registered) led_classdev_unregister(&logolamp_led); diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c index a4f68e5b9c9..b44462a6c6d 100644 --- a/drivers/s390/char/sclp_async.c +++ b/drivers/s390/char/sclp_async.c @@ -26,7 +26,6 @@ static struct sclp_async_sccb *sccb; static int sclp_async_send_wait(char *message); static struct ctl_table_header *callhome_sysctl_header; static DEFINE_SPINLOCK(sclp_async_lock); -static char nodename[64]; #define SCLP_NORMAL_WRITE 0x00 struct async_evbuf { @@ -52,9 +51,10 @@ static struct sclp_register sclp_async_register = { static int call_home_on_panic(struct notifier_block *self, unsigned long event, void *data) { - strncat(data, nodename, strlen(nodename)); - sclp_async_send_wait(data); - return NOTIFY_DONE; + strncat(data, init_utsname()->nodename, + sizeof(init_utsname()->nodename)); + sclp_async_send_wait(data); + return NOTIFY_DONE; } static struct notifier_block call_home_panic_nb = { @@ -68,15 +68,14 @@ static int proc_handler_callhome(struct ctl_table *ctl, int write, { unsigned long val; int len, rc; - char buf[2]; + char buf[3]; - if (!*count | (*ppos && !write)) { + if (!*count || (*ppos && !write)) { *count = 0; return 0; } if (!write) { - len = sprintf(buf, "%d\n", callhome_enabled); - buf[len] = '\0'; + len = snprintf(buf, sizeof(buf), "%d\n", callhome_enabled); rc = copy_to_user(buffer, buf, sizeof(buf)); if (rc != 0) return -EFAULT; @@ -171,39 +170,29 @@ static int __init sclp_async_init(void) rc = sclp_register(&sclp_async_register); if (rc) return rc; - callhome_sysctl_header = register_sysctl_table(kern_dir_table); - if (!callhome_sysctl_header) { - rc = -ENOMEM; - goto out_sclp; - } - if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) { - rc = -EOPNOTSUPP; + rc = -EOPNOTSUPP; + if (!(sclp_async_register.sclp_receive_mask & EVTYP_ASYNC_MASK)) goto out_sclp; - } rc = -ENOMEM; + callhome_sysctl_header = register_sysctl_table(kern_dir_table); + if (!callhome_sysctl_header) + goto out_sclp; request = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); - if (!request) - goto out_sys; sccb = (struct sclp_async_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA); - if (!sccb) + if (!request || !sccb) goto out_mem; - rc = atomic_notifier_chain_register(&panic_notifier_list, - &call_home_panic_nb); - if (rc) - goto out_mem; - - strncpy(nodename, init_utsname()->nodename, 64); - return 0; - + rc = atomic_notifier_chain_register(&panic_notifier_list, + &call_home_panic_nb); + if (!rc) + goto out; out_mem: kfree(request); free_page((unsigned long) sccb); -out_sys: unregister_sysctl_table(callhome_sysctl_header); out_sclp: sclp_unregister(&sclp_async_register); +out: return rc; - } module_init(sclp_async_init); diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 102000d1af6..3012355f830 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c @@ -158,7 +158,12 @@ static int smsg_pm_restore_thaw(struct device *dev) smsg_path->flags = 0; rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", NULL, NULL, NULL); - printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); +#ifdef CONFIG_PM_DEBUG + if (rc) + printk(KERN_ERR + "iucv_path_connect returned with rc %i\n", rc); +#endif + cpcmd("SET SMSG IUCV", NULL, 0, NULL); } return 0; } diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 0f79f3af4f5..2889e5f2dfd 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -128,12 +128,13 @@ out_ccwdev: static void __init zfcp_init_device_setup(char *devstr) { char *token; - char *str; + char *str, *str_saved; char busid[ZFCP_BUS_ID_SIZE]; u64 wwpn, lun; /* duplicate devstr and keep the original for sysfs presentation*/ - str = kmalloc(strlen(devstr) + 1, GFP_KERNEL); + str_saved = kmalloc(strlen(devstr) + 1, GFP_KERNEL); + str = str_saved; if (!str) return; @@ -152,12 +153,12 @@ static void __init zfcp_init_device_setup(char *devstr) if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun)) goto err_out; - kfree(str); + kfree(str_saved); zfcp_init_device_configure(busid, wwpn, lun); return; - err_out: - kfree(str); +err_out: + kfree(str_saved); pr_err("%s is not a valid SCSI device\n", devstr); } diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 73d366ba31e..f73e2180f33 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -858,10 +858,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) if (fc_host_port_type(adapter->scsi_host) == FC_PORTTYPE_PTP) return zfcp_erp_open_ptp_port(act); if (!port->d_id) { - zfcp_port_get(port); - if (!queue_work(adapter->work_queue, - &port->gid_pn_work)) - zfcp_port_put(port); + zfcp_fc_trigger_did_lookup(port); return ZFCP_ERP_EXIT; } return zfcp_erp_port_strategy_open_port(act); @@ -869,12 +866,11 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) case ZFCP_ERP_STEP_PORT_OPENING: /* D_ID might have changed during open */ if (p_status & ZFCP_STATUS_COMMON_OPEN) { - if (port->d_id) - return ZFCP_ERP_SUCCEEDED; - else { - act->step = ZFCP_ERP_STEP_PORT_CLOSING; - return ZFCP_ERP_CONTINUES; + if (!port->d_id) { + zfcp_fc_trigger_did_lookup(port); + return ZFCP_ERP_EXIT; } + return ZFCP_ERP_SUCCEEDED; } if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) { port->d_id = 0; @@ -889,19 +885,21 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act) static int zfcp_erp_port_strategy(struct zfcp_erp_action *erp_action) { struct zfcp_port *port = erp_action->port; + int p_status = atomic_read(&port->status); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_NOESC) + if ((p_status & ZFCP_STATUS_COMMON_NOESC) && + !(p_status & ZFCP_STATUS_COMMON_OPEN)) goto close_init_done; switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: zfcp_erp_port_strategy_clearstati(port); - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) + if (p_status & ZFCP_STATUS_COMMON_OPEN) return zfcp_erp_port_strategy_close(erp_action); break; case ZFCP_ERP_STEP_PORT_CLOSING: - if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_OPEN) + if (p_status & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_FAILED; break; } diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 629edec7040..b3f28deb450 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -96,6 +96,7 @@ extern int zfcp_fc_scan_ports(struct zfcp_adapter *); extern void _zfcp_fc_scan_ports_later(struct work_struct *); extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *); extern void zfcp_fc_port_did_lookup(struct work_struct *); +extern void zfcp_fc_trigger_did_lookup(struct zfcp_port *); extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *); extern void zfcp_fc_test_link(struct zfcp_port *); extern void zfcp_fc_link_test_work(struct work_struct *); diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 722f22de875..df23bcead23 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -361,6 +361,17 @@ out: } /** + * zfcp_fc_trigger_did_lookup - trigger the d_id lookup using a GID_PN request + * @port: The zfcp_port to lookup the d_id for. + */ +void zfcp_fc_trigger_did_lookup(struct zfcp_port *port) +{ + zfcp_port_get(port); + if (!queue_work(port->adapter->work_queue, &port->gid_pn_work)) + zfcp_port_put(port); +} + +/** * zfcp_fc_plogi_evaluate - evaluate PLOGI playload * @port: zfcp_port structure * @plogi: plogi payload diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 38a7e4a6b63..4e41baa0c14 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -1079,7 +1079,7 @@ static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, /* common settings for ct/gs and els requests */ req->qtcb->bottom.support.service_class = FSF_CLASS_3; req->qtcb->bottom.support.timeout = 2 * R_A_TOV; - zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10); + zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ); return 0; } @@ -1475,9 +1475,16 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els; if (req->qtcb->bottom.support.els1_length >= FSF_PLOGI_MIN_LEN) { - if (plogi->serv_param.wwpn != port->wwpn) + if (plogi->serv_param.wwpn != port->wwpn) { port->d_id = 0; - else { + dev_warn(&port->adapter->ccw_device->dev, + "A port opened with WWPN 0x%016Lx " + "returned data that identifies it as " + "WWPN 0x%016Lx\n", + (unsigned long long) port->wwpn, + (unsigned long long) + plogi->serv_param.wwpn); + } else { port->wwnn = plogi->serv_param.wwnn; zfcp_fc_plogi_evaluate(port, plogi); } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 079a8cf518a..d31000886ca 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -224,6 +224,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev, zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL); zfcp_erp_wait(unit->port->adapter); + flush_work(&unit->scsi_work); zfcp_unit_put(unit); out: mutex_unlock(&zfcp_data.config_mutex); diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index b6af63ca980..496764349c4 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c @@ -1918,6 +1918,10 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg) } size = size>>16; size *= 4; + if (size > MAX_MESSAGE_SIZE) { + rcode = -EINVAL; + goto cleanup; + } /* Copy in the user's I2O command */ if (copy_from_user (msg, user_msg, size)) { rcode = -EFAULT; diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index c4478380140..0547a7f44d4 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -317,6 +317,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, out_device_destroy: scsi_device_set_state(sdev, SDEV_DEL); transport_destroy_device(&sdev->sdev_gendev); + put_device(&sdev->sdev_dev); put_device(&sdev->sdev_gendev); out: if (display_failure_msg) @@ -957,6 +958,7 @@ static inline void scsi_destroy_sdev(struct scsi_device *sdev) if (sdev->host->hostt->slave_destroy) sdev->host->hostt->slave_destroy(sdev); transport_destroy_device(&sdev->sdev_gendev); + put_device(&sdev->sdev_dev); put_device(&sdev->sdev_gendev); } diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index fde54537d71..5c7eb63a19d 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -864,10 +864,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) goto clean_device; } - /* take a reference for the sdev_dev; this is - * released by the sdev_class .release */ - get_device(&sdev->sdev_gendev); - /* create queue files, which may be writable, depending on the host */ if (sdev->host->hostt->change_queue_depth) error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw); @@ -917,6 +913,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) device_del(&sdev->sdev_gendev); transport_destroy_device(&sdev->sdev_gendev); + put_device(&sdev->sdev_dev); put_device(&sdev->sdev_gendev); return error; @@ -1065,7 +1062,7 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev) sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); device_initialize(&sdev->sdev_dev); - sdev->sdev_dev.parent = &sdev->sdev_gendev; + sdev->sdev_dev.parent = get_device(&sdev->sdev_gendev); sdev->sdev_dev.class = &sdev_class; dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%d", sdev->host->host_no, sdev->channel, sdev->id, sdev->lun); diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index e7108e75653..42e8550cd2b 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c @@ -1561,11 +1561,16 @@ enum pci_board_num_t { pbn_exar_XR17C152, pbn_exar_XR17C154, pbn_exar_XR17C158, + pbn_exar_ibm_saturn, pbn_pasemi_1682M, pbn_ni8430_2, pbn_ni8430_4, pbn_ni8430_8, pbn_ni8430_16, + pbn_ADDIDATA_PCIe_1_3906250, + pbn_ADDIDATA_PCIe_2_3906250, + pbn_ADDIDATA_PCIe_4_3906250, + pbn_ADDIDATA_PCIe_8_3906250, }; /* @@ -2146,6 +2151,13 @@ static struct pciserial_board pci_boards[] __devinitdata = { .base_baud = 921600, .uart_offset = 0x200, }, + [pbn_exar_ibm_saturn] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 921600, + .uart_offset = 0x200, + }, + /* * PA Semi PWRficient PA6T-1682M on-chip UART */ @@ -2185,6 +2197,37 @@ static struct pciserial_board pci_boards[] __devinitdata = { .uart_offset = 0x10, .first_offset = 0x800, }, + /* + * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com> + */ + [pbn_ADDIDATA_PCIe_1_3906250] = { + .flags = FL_BASE0, + .num_ports = 1, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_2_3906250] = { + .flags = FL_BASE0, + .num_ports = 2, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_4_3906250] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, + [pbn_ADDIDATA_PCIe_8_3906250] = { + .flags = FL_BASE0, + .num_ports = 8, + .base_baud = 3906250, + .uart_offset = 0x200, + .first_offset = 0x1000, + }, }; static const struct pci_device_id softmodem_blacklist[] = { @@ -2649,6 +2692,9 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_SUBVENDOR_ID_CONNECT_TECH, PCI_SUBDEVICE_ID_CONNECT_TECH_PCI_UART_8_485, 0, 0, pbn_b0_8_1843200_200 }, + { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17C152, + PCI_VENDOR_ID_IBM, PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT, + 0, 0, pbn_exar_ibm_saturn }, { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, PCI_ANY_ID, PCI_ANY_ID, 0, 0, @@ -3556,6 +3602,38 @@ static struct pci_device_id serial_pci_tbl[] = { 0, pbn_b0_8_115200 }, + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7500, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_4_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7420, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_2_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7300, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_1_3906250 }, + + { PCI_VENDOR_ID_ADDIDATA, + PCI_DEVICE_ID_ADDIDATA_APCIe7800, + PCI_ANY_ID, + PCI_ANY_ID, + 0, + 0, + pbn_ADDIDATA_PCIe_8_3906250 }, + { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9835, PCI_VENDOR_ID_IBM, 0x0299, 0, 0, pbn_b0_bt_2_115200 }, diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 3551c5cb709..9d948bccafa 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -1531,7 +1531,7 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) void *data; int ret; - BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); + BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1)); port = &atmel_ports[pdev->id]; port->backup_imr = 0; diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index d7bcd074d38..7ce9e9f567a 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c @@ -705,7 +705,7 @@ mpc52xx_uart_verify_port(struct uart_port *port, struct serial_struct *ser) return -EINVAL; if ((ser->irq != port->irq) || - (ser->io_type != SERIAL_IO_MEM) || + (ser->io_type != UPIO_MEM) || (ser->baud_base != port->uartclk) || (ser->iomem_base != (void *)port->mapbase) || (ser->hub6 != 0)) diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c index ff4617e2142..7c7914f5fa0 100644 --- a/drivers/serial/serial_cs.c +++ b/drivers/serial/serial_cs.c @@ -879,10 +879,10 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ - PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ - PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ - PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ + PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ + PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 7df3ba4f1f4..d21b3469f6d 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -93,8 +93,6 @@ source "drivers/staging/dst/Kconfig" source "drivers/staging/pohmelfs/Kconfig" -source "drivers/staging/stlc45xx/Kconfig" - source "drivers/staging/b3dfg/Kconfig" source "drivers/staging/phison/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 74757117226..8cbf1aebea2 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -29,7 +29,6 @@ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_ANDROID) += dream/ obj-$(CONFIG_DST) += dst/ obj-$(CONFIG_POHMELFS) += pohmelfs/ -obj-$(CONFIG_STLC45XX) += stlc45xx/ obj-$(CONFIG_B3DFG) += b3dfg/ obj-$(CONFIG_IDE_PHISON) += phison/ obj-$(CONFIG_PLAN9AUTH) += p9auth/ diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 24719499237..eb675635ae6 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -2,6 +2,7 @@ menu "Android" config ANDROID bool "Android Drivers" + depends on BROKEN default N ---help--- Enable support for various drivers needed on the Android platform diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c index 94c5d27d24d..cda26bb493b 100644 --- a/drivers/staging/b3dfg/b3dfg.c +++ b/drivers/staging/b3dfg/b3dfg.c @@ -36,6 +36,7 @@ #include <linux/wait.h> #include <linux/mm.h> #include <linux/uaccess.h> +#include <linux/sched.h> static unsigned int b3dfg_nbuf = 2; diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c index 2cda7ad1d32..80e192d2e77 100644 --- a/drivers/staging/comedi/drivers/me_daq.c +++ b/drivers/staging/comedi/drivers/me_daq.c @@ -51,6 +51,7 @@ from http://www.comedi.org */ #include <linux/interrupt.h> +#include <linux/sched.h> #include "../comedidev.h" #include "comedi_pci.h" diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index e3ffb067ead..753ee051234 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -62,6 +62,7 @@ /* #define DEBUG_STATUS_B */ #include <linux/interrupt.h> +#include <linux/sched.h> #include "8255.h" #include "mite.h" #include "comedi_fc.h" diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c index 52b2eca9e73..d544698f241 100644 --- a/drivers/staging/comedi/drivers/ni_pcidio.c +++ b/drivers/staging/comedi/drivers/ni_pcidio.c @@ -70,6 +70,7 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org /* #define DEBUG_FLAGS */ #include <linux/interrupt.h> +#include <linux/sched.h> #include "../comedidev.h" #include "mite.h" diff --git a/drivers/staging/et131x/et1310_address_map.h b/drivers/staging/et131x/et1310_address_map.h index 6294d3814e7..2c3d65a622a 100644 --- a/drivers/staging/et131x/et1310_address_map.h +++ b/drivers/staging/et131x/et1310_address_map.h @@ -223,7 +223,7 @@ typedef union _TXDMA_PR_NUM_DES_t { extern inline void add_10bit(u32 *v, int n) { - *v = INDEX10(*v + n); + *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP); } /* diff --git a/drivers/staging/et131x/et1310_rx.c b/drivers/staging/et131x/et1310_rx.c index 8f2e91fa0a8..10e21db57ac 100644 --- a/drivers/staging/et131x/et1310_rx.c +++ b/drivers/staging/et131x/et1310_rx.c @@ -1177,12 +1177,20 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *etdev) static inline u32 bump_fbr(u32 *fbr, u32 limit) { - u32 v = *fbr; - add_10bit(&v, 1); - if (v > limit) - v = (*fbr & ~ET_DMA10_MASK) ^ ET_DMA10_WRAP; - *fbr = v; - return v; + u32 v = *fbr; + v++; + /* This works for all cases where limit < 1024. The 1023 case + works because 1023++ is 1024 which means the if condition is not + taken but the carry of the bit into the wrap bit toggles the wrap + value correctly */ + if ((v & ET_DMA10_MASK) > limit) { + v &= ~ET_DMA10_MASK; + v ^= ET_DMA10_WRAP; + } + /* For the 1023 case */ + v &= (ET_DMA10_MASK|ET_DMA10_WRAP); + *fbr = v; + return v; } /** diff --git a/drivers/staging/hv/ChannelMgmt.h b/drivers/staging/hv/ChannelMgmt.h index a839d8fe6ce..fa973d86b62 100644 --- a/drivers/staging/hv/ChannelMgmt.h +++ b/drivers/staging/hv/ChannelMgmt.h @@ -26,6 +26,7 @@ #define _CHANNEL_MGMT_H_ #include <linux/list.h> +#include <linux/timer.h> #include "RingBuffer.h" #include "VmbusChannelInterface.h" #include "VmbusPacketFormat.h" @@ -54,7 +55,7 @@ enum vmbus_channel_message_type { ChannelMessageViewRangeRemove = 18, #endif ChannelMessageCount -} __attribute__((packed)); +}; struct vmbus_channel_message_header { enum vmbus_channel_message_type MessageType; diff --git a/drivers/staging/hv/NetVsc.c b/drivers/staging/hv/NetVsc.c index 1610b845198..d384c0ddf06 100644 --- a/drivers/staging/hv/NetVsc.c +++ b/drivers/staging/hv/NetVsc.c @@ -1052,7 +1052,7 @@ static void NetVscOnReceive(struct hv_device *Device, */ spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags); while (!list_empty(&netDevice->ReceivePacketList)) { - list_move_tail(&netDevice->ReceivePacketList, &listHead); + list_move_tail(netDevice->ReceivePacketList.next, &listHead); if (++count == vmxferpagePacket->RangeCount + 1) break; } @@ -1071,7 +1071,7 @@ static void NetVscOnReceive(struct hv_device *Device, /* Return it to the freelist */ spin_lock_irqsave(&netDevice->receive_packet_list_lock, flags); for (i = count; i != 0; i--) { - list_move_tail(&listHead, + list_move_tail(listHead.next, &netDevice->ReceivePacketList); } spin_unlock_irqrestore(&netDevice->receive_packet_list_lock, @@ -1085,8 +1085,7 @@ static void NetVscOnReceive(struct hv_device *Device, } /* Remove the 1st packet to represent the xfer page packet itself */ - xferpagePacket = list_entry(&listHead, struct xferpage_packet, - ListEntry); + xferpagePacket = (struct xferpage_packet*)listHead.next; list_del(&xferpagePacket->ListEntry); /* This is how much we can satisfy */ @@ -1102,8 +1101,7 @@ static void NetVscOnReceive(struct hv_device *Device, /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < (count - 1); i++) { - netvscPacket = list_entry(&listHead, struct hv_netvsc_packet, - ListEntry); + netvscPacket = (struct hv_netvsc_packet*)listHead.next; list_del(&netvscPacket->ListEntry); /* Initialize the netvsc packet */ diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO index 4d390b23774..dbfbde937a6 100644 --- a/drivers/staging/hv/TODO +++ b/drivers/staging/hv/TODO @@ -1,11 +1,17 @@ TODO: - fix remaining checkpatch warnings and errors + - use of /** when it is not a kerneldoc header - remove RingBuffer.c to us in-kernel ringbuffer functions instead. - audit the vmbus to verify it is working properly with the driver model + - convert vmbus driver interface function pointer tables + to constant, a.k.a vmbus_ops - see if the vmbus can be merged with the other virtual busses in the kernel - audit the network driver + - use existing net_device_stats struct in network device + - checking for carrier inside open is wrong, network device API + confusion?? - audit the block driver - audit the scsi driver diff --git a/drivers/staging/hv/osd.c b/drivers/staging/hv/osd.c index 8fe543bd991..3a4793a0fd0 100644 --- a/drivers/staging/hv/osd.c +++ b/drivers/staging/hv/osd.c @@ -30,6 +30,7 @@ #include <linux/ioport.h> #include <linux/irq.h> #include <linux/interrupt.h> +#include <linux/sched.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/workqueue.h> diff --git a/drivers/staging/hv/osd.h b/drivers/staging/hv/osd.h index 9504604c72b..ce064e8ea64 100644 --- a/drivers/staging/hv/osd.h +++ b/drivers/staging/hv/osd.h @@ -25,6 +25,7 @@ #ifndef _OSD_H_ #define _OSD_H_ +#include <linux/workqueue.h> /* Defines */ #define ALIGN_UP(value, align) (((value) & (align-1)) ? \ diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c index 582318f1022..894eecfc63c 100644 --- a/drivers/staging/hv/vmbus_drv.c +++ b/drivers/staging/hv/vmbus_drv.c @@ -507,12 +507,12 @@ static struct hv_device *vmbus_child_device_create(struct hv_guid *type, child_device_obj = &child_device_ctx->device_obj; child_device_obj->context = context; - memcpy(&child_device_obj->deviceType, &type, sizeof(struct hv_guid)); - memcpy(&child_device_obj->deviceInstance, &instance, + memcpy(&child_device_obj->deviceType, type, sizeof(struct hv_guid)); + memcpy(&child_device_obj->deviceInstance, instance, sizeof(struct hv_guid)); - memcpy(&child_device_ctx->class_id, &type, sizeof(struct hv_guid)); - memcpy(&child_device_ctx->device_id, &instance, sizeof(struct hv_guid)); + memcpy(&child_device_ctx->class_id, type, sizeof(struct hv_guid)); + memcpy(&child_device_ctx->device_id, instance, sizeof(struct hv_guid)); DPRINT_EXIT(VMBUS_DRV); @@ -537,18 +537,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj, DPRINT_DBG(VMBUS_DRV, "child device (%p) registering", child_device_ctx); - /* Make sure we are not registered already */ - if (strlen(dev_name(&child_device_ctx->device)) != 0) { - DPRINT_ERR(VMBUS_DRV, - "child device (%p) already registered - busid %s", - child_device_ctx, - dev_name(&child_device_ctx->device)); - - ret = -1; - goto Cleanup; - } - - /* Set the device bus id. Otherwise, device_register()will fail. */ + /* Set the device name. Otherwise, device_register() will fail. */ dev_set_name(&child_device_ctx->device, "vmbus_0_%d", atomic_inc_return(&device_num)); @@ -573,7 +562,6 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj, DPRINT_INFO(VMBUS_DRV, "child device (%p) registered", &child_device_ctx->device); -Cleanup: DPRINT_EXIT(VMBUS_DRV); return ret; @@ -623,8 +611,6 @@ static void vmbus_child_device_destroy(struct hv_device *device_obj) static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) { struct device_context *device_ctx = device_to_device_context(device); - int i = 0; - int len = 0; int ret; DPRINT_ENTER(VMBUS_DRV); @@ -644,8 +630,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) device_ctx->class_id.data[14], device_ctx->class_id.data[15]); - env->envp_idx = i; - env->buflen = len; ret = add_uevent_var(env, "VMBUS_DEVICE_CLASS_GUID={" "%02x%02x%02x%02x-%02x%02x-%02x%02x-" "%02x%02x%02x%02x%02x%02x%02x%02x}", @@ -691,8 +675,6 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) if (ret) return ret; - env->envp[env->envp_idx] = NULL; - DPRINT_EXIT(VMBUS_DRV); return 0; diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c index 1fa18f25581..768f44894d0 100644 --- a/drivers/staging/iio/industrialio-core.c +++ b/drivers/staging/iio/industrialio-core.c @@ -18,6 +18,8 @@ #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/poll.h> +#include <linux/sched.h> +#include <linux/wait.h> #include <linux/cdev.h> #include "iio.h" #include "trigger_consumer.h" diff --git a/drivers/staging/otus/Kconfig b/drivers/staging/otus/Kconfig index d549d08fd49..f6cc2625e34 100644 --- a/drivers/staging/otus/Kconfig +++ b/drivers/staging/otus/Kconfig @@ -1,6 +1,6 @@ config OTUS tristate "Atheros OTUS 802.11n USB wireless support" - depends on USB && WLAN_80211 && MAC80211 + depends on USB && WLAN && MAC80211 default N ---help--- Enable support for Atheros 802.11n USB hardware: diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c index dd7d3fde969..4ce399b6d23 100644 --- a/drivers/staging/panel/panel.c +++ b/drivers/staging/panel/panel.c @@ -2071,11 +2071,15 @@ static void panel_detach(struct parport *port) return; } - if (keypad_enabled && keypad_initialized) + if (keypad_enabled && keypad_initialized) { misc_deregister(&keypad_dev); + keypad_initialized = 0; + } - if (lcd_enabled && lcd_initialized) + if (lcd_enabled && lcd_initialized) { misc_deregister(&lcd_dev); + lcd_initialized = 0; + } parport_release(pprt); parport_unregister_device(pprt); @@ -2211,13 +2215,16 @@ static void __exit panel_cleanup_module(void) del_timer(&scan_timer); if (pprt != NULL) { - if (keypad_enabled) + if (keypad_enabled) { misc_deregister(&keypad_dev); + keypad_initialized = 0; + } if (lcd_enabled) { panel_lcd_print("\x0cLCD driver " PANEL_VERSION "\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-"); misc_deregister(&lcd_dev); + lcd_initialized = 0; } /* TODO: free all input signals */ diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c index 0d111ddfabb..2eb8e3d43c4 100644 --- a/drivers/staging/poch/poch.c +++ b/drivers/staging/poch/poch.c @@ -20,6 +20,7 @@ #include <linux/init.h> #include <linux/ioctl.h> #include <linux/io.h> +#include <linux/sched.h> #include "poch.h" diff --git a/drivers/staging/rt2860/Kconfig b/drivers/staging/rt2860/Kconfig index 7f44e5e7246..efe38e25c5e 100644 --- a/drivers/staging/rt2860/Kconfig +++ b/drivers/staging/rt2860/Kconfig @@ -1,5 +1,5 @@ config RT2860 tristate "Ralink 2860 wireless support" - depends on PCI && X86 && WLAN_80211 + depends on PCI && X86 && WLAN ---help--- This is an experimental driver for the Ralink 2860 wireless chip. diff --git a/drivers/staging/rt2860/common/cmm_data_2860.c b/drivers/staging/rt2860/common/cmm_data_2860.c index fb1735533b7..857ff450b6c 100644 --- a/drivers/staging/rt2860/common/cmm_data_2860.c +++ b/drivers/staging/rt2860/common/cmm_data_2860.c @@ -363,6 +363,8 @@ int RtmpPCIMgmtKickOut( ULONG SwIdx = pAd->MgmtRing.TxCpuIdx; pTxD = (PTXD_STRUC) pAd->MgmtRing.Cell[SwIdx].AllocVa; + if (!pTxD) + return 0; pAd->MgmtRing.Cell[SwIdx].pNdisPacket = pPacket; pAd->MgmtRing.Cell[SwIdx].pNextNdisPacket = NULL; diff --git a/drivers/staging/rt2860/common/cmm_info.c b/drivers/staging/rt2860/common/cmm_info.c index 9d589c240ed..019cc4474ce 100644 --- a/drivers/staging/rt2860/common/cmm_info.c +++ b/drivers/staging/rt2860/common/cmm_info.c @@ -25,6 +25,7 @@ ************************************************************************* */ +#include <linux/sched.h> #include "../rt_config.h" INT Show_SSID_Proc( diff --git a/drivers/staging/rt2860/rt_linux.c b/drivers/staging/rt2860/rt_linux.c index b396a9b570e..ed27b8545a1 100644 --- a/drivers/staging/rt2860/rt_linux.c +++ b/drivers/staging/rt2860/rt_linux.c @@ -25,6 +25,7 @@ ************************************************************************* */ +#include <linux/sched.h> #include "rt_config.h" ULONG RTDebugLevel = RT_DEBUG_ERROR; diff --git a/drivers/staging/rt2870/Kconfig b/drivers/staging/rt2870/Kconfig index 76841f6dea9..aea5c822181 100644 --- a/drivers/staging/rt2870/Kconfig +++ b/drivers/staging/rt2870/Kconfig @@ -1,5 +1,5 @@ config RT2870 tristate "Ralink 2870/3070 wireless support" - depends on USB && X86 && WLAN_80211 + depends on USB && X86 && WLAN ---help--- This is an experimental driver for the Ralink xx70 wireless chips. diff --git a/drivers/staging/rt3090/Kconfig b/drivers/staging/rt3090/Kconfig index 255e8eaa483..2b3f745d72b 100644 --- a/drivers/staging/rt3090/Kconfig +++ b/drivers/staging/rt3090/Kconfig @@ -1,5 +1,5 @@ config RT3090 tristate "Ralink 3090 wireless support" - depends on PCI && X86 && WLAN_80211 + depends on PCI && X86 && WLAN ---help--- This is an experimental driver for the Ralink 3090 wireless chip. diff --git a/drivers/staging/rt3090/common/cmm_info.c b/drivers/staging/rt3090/common/cmm_info.c index 5be0714666c..3e51e98b474 100644 --- a/drivers/staging/rt3090/common/cmm_info.c +++ b/drivers/staging/rt3090/common/cmm_info.c @@ -34,6 +34,7 @@ --------- ---------- ---------------------------------------------- */ +#include <linux/sched.h> #include "../rt_config.h" diff --git a/drivers/staging/rt3090/rt_linux.c b/drivers/staging/rt3090/rt_linux.c index d2241ecdf58..9b94aa6eb90 100644 --- a/drivers/staging/rt3090/rt_linux.c +++ b/drivers/staging/rt3090/rt_linux.c @@ -25,6 +25,7 @@ ************************************************************************* */ +#include <linux/sched.h> #include "rt_config.h" ULONG RTDebugLevel = RT_DEBUG_ERROR; diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig index 236e4272544..203c79b8180 100644 --- a/drivers/staging/rtl8187se/Kconfig +++ b/drivers/staging/rtl8187se/Kconfig @@ -1,6 +1,6 @@ config RTL8187SE tristate "RealTek RTL8187SE Wireless LAN NIC driver" - depends on PCI + depends on PCI && WLAN depends on WIRELESS_EXT default N ---help--- diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c index 013c3e19ae2..4c5d63fd583 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_crypt.c @@ -53,10 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, list_del(ptr); - if (entry->ops) { + if (entry->ops) entry->ops->deinit(entry->priv); - module_put(entry->ops->owner); - } kfree(entry); } } diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c index 6fbe4890cb6..18392fce487 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_module.c @@ -189,10 +189,8 @@ void free_ieee80211(struct net_device *dev) for (i = 0; i < WEP_KEYS; i++) { struct ieee80211_crypt_data *crypt = ieee->crypt[i]; if (crypt) { - if (crypt->ops) { + if (crypt->ops) crypt->ops->deinit(crypt->priv); - module_put(crypt->ops->owner); - } kfree(crypt); ieee->crypt[i] = NULL; } diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c index 59b2ab48cdc..334e4c7ec61 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c @@ -2839,16 +2839,12 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, goto skip_host_crypt; ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { - request_module("ieee80211_crypt_wep"); + if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { - request_module("ieee80211_crypt_tkip"); + else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { - request_module("ieee80211_crypt_ccmp"); + else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } if (ops == NULL) { printk("unknown crypto alg '%s'\n", param->u.crypt.alg); param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; @@ -2869,7 +2865,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, } memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c index 8d8bdd0a130..a08b97a0951 100644 --- a/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c +++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_wx.c @@ -331,12 +331,10 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, return -ENOMEM; memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - if (!new_crypt->ops) { - request_module("ieee80211_crypt_wep"); + if (!new_crypt->ops) new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - } - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { @@ -483,7 +481,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx, ret = 0; int group_key = 0; - const char *alg, *module; + const char *alg; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; @@ -539,15 +537,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; - module = "ieee80211_crypt_wep"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; - module = "ieee80211_crypt_tkip"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; - module = "ieee80211_crypt_ccmp"; break; default: IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", @@ -558,10 +553,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, // printk("8-09-08-9=====>%s, alg name:%s\n",__func__, alg); ops = ieee80211_get_crypto_ops(alg); - if (ops == NULL) { - request_module(module); + if (ops == NULL) ops = ieee80211_get_crypto_ops(alg); - } if (ops == NULL) { IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); @@ -581,7 +574,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, goto done; } new_crypt->ops = ops; - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig index 3100aa58c94..37e4fde4507 100644 --- a/drivers/staging/rtl8192e/Kconfig +++ b/drivers/staging/rtl8192e/Kconfig @@ -1,6 +1,6 @@ config RTL8192E tristate "RealTek RTL8192E Wireless LAN NIC driver" - depends on PCI + depends on PCI && WLAN depends on WIRELESS_EXT default N ---help--- diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c index 1a8ea8a40c3..b1c54932da3 100644 --- a/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c +++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_crypt.c @@ -53,14 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, list_del(ptr); - if (entry->ops) { + if (entry->ops) entry->ops->deinit(entry->priv); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) - module_put(entry->ops->owner); -#else - __MOD_DEC_USE_COUNT(entry->ops->owner); -#endif - } kfree(entry); } } diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c index 16256a31f99..12c2a18e1fa 100644 --- a/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_module.c @@ -242,14 +242,8 @@ void free_ieee80211(struct net_device *dev) for (i = 0; i < WEP_KEYS; i++) { struct ieee80211_crypt_data *crypt = ieee->crypt[i]; if (crypt) { - if (crypt->ops) { + if (crypt->ops) crypt->ops->deinit(crypt->priv); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) - module_put(crypt->ops->owner); -#else - __MOD_DEC_USE_COUNT(crypt->ops->owner); -#endif - } kfree(crypt); ieee->crypt[i] = NULL; } diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c index 2fc04df872c..eae7c4579a6 100644 --- a/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_softmac.c @@ -3284,17 +3284,14 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, goto skip_host_crypt; ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { - request_module("ieee80211_crypt_wep"); + if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place - } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { - request_module("ieee80211_crypt_tkip"); + /* set WEP40 first, it will be modified according to WEP104 or + * WEP40 at other place */ + else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { - request_module("ieee80211_crypt_ccmp"); + else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } if (ops == NULL) { printk("unknown crypto alg '%s'\n", param->u.crypt.alg); param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; @@ -3315,11 +3312,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, } memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) -#else - if (new_crypt->ops && try_inc_mod_count(new_crypt->ops->owner)) -#endif + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); diff --git a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c index 223483126b0..4e34a1f4c66 100644 --- a/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c +++ b/drivers/staging/rtl8192e/ieee80211/ieee80211_wx.c @@ -482,15 +482,9 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, return -ENOMEM; memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - if (!new_crypt->ops) { - request_module("ieee80211_crypt_wep"); + if (!new_crypt->ops) new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - } -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) -#else - if (new_crypt->ops && try_inc_mod_count(new_crypt->ops->owner)) -#endif + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { @@ -644,7 +638,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx; int group_key = 0; - const char *alg, *module; + const char *alg; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; @@ -711,15 +705,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; - module = "ieee80211_crypt_wep"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; - module = "ieee80211_crypt_tkip"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; - module = "ieee80211_crypt_ccmp"; break; default: IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", @@ -730,10 +721,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, printk("alg name:%s\n",alg); ops = ieee80211_get_crypto_ops(alg); - if (ops == NULL) { - request_module(module); + if (ops == NULL) ops = ieee80211_get_crypto_ops(alg); - } if (ops == NULL) { IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); @@ -758,7 +747,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, goto done; } new_crypt->ops = ops; - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); diff --git a/drivers/staging/rtl8192su/Kconfig b/drivers/staging/rtl8192su/Kconfig index 770f41280f2..b8c95f94206 100644 --- a/drivers/staging/rtl8192su/Kconfig +++ b/drivers/staging/rtl8192su/Kconfig @@ -1,6 +1,6 @@ config RTL8192SU tristate "RealTek RTL8192SU Wireless LAN NIC driver" - depends on PCI + depends on PCI && WLAN depends on WIRELESS_EXT default N ---help--- diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c index d76a54d59d2..521e7b98993 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_crypt.c @@ -53,10 +53,8 @@ void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee, list_del(ptr); - if (entry->ops) { + if (entry->ops) entry->ops->deinit(entry->priv); - module_put(entry->ops->owner); - } kfree(entry); } } diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c index 68dc8fa094c..c3383bb8b76 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_module.c @@ -216,10 +216,8 @@ void free_ieee80211(struct net_device *dev) for (i = 0; i < WEP_KEYS; i++) { struct ieee80211_crypt_data *crypt = ieee->crypt[i]; if (crypt) { - if (crypt->ops) { + if (crypt->ops) crypt->ops->deinit(crypt->priv); - module_put(crypt->ops->owner); - } kfree(crypt); ieee->crypt[i] = NULL; } diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c index c64ae03f68a..fd8e11252f1 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c @@ -3026,17 +3026,14 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, goto skip_host_crypt; ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { - request_module("ieee80211_crypt_wep"); + if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - //set WEP40 first, it will be modified according to WEP104 or WEP40 at other place - } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { - request_module("ieee80211_crypt_tkip"); + /* set WEP40 first, it will be modified according to WEP104 or + * WEP40 at other place */ + else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { - request_module("ieee80211_crypt_ccmp"); + else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) ops = ieee80211_get_crypto_ops(param->u.crypt.alg); - } if (ops == NULL) { printk("unknown crypto alg '%s'\n", param->u.crypt.alg); param->u.crypt.err = IEEE_CRYPT_ERR_UNKNOWN_ALG; @@ -3058,7 +3055,7 @@ static int ieee80211_wpa_set_encryption(struct ieee80211_device *ieee, memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ops; - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c index 10775902433..6146c6435dd 100644 --- a/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c +++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_wx.c @@ -358,11 +358,9 @@ int ieee80211_wx_set_encode(struct ieee80211_device *ieee, return -ENOMEM; memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data)); new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - if (!new_crypt->ops) { - request_module("ieee80211_crypt_wep"); + if (!new_crypt->ops) new_crypt->ops = ieee80211_get_crypto_ops("WEP"); - } - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(key); if (!new_crypt->ops || !new_crypt->priv) { @@ -507,7 +505,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int i, idx; int group_key = 0; - const char *alg, *module; + const char *alg; struct ieee80211_crypto_ops *ops; struct ieee80211_crypt_data **crypt; @@ -570,15 +568,12 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; - module = "ieee80211_crypt_wep"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; - module = "ieee80211_crypt_tkip"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; - module = "ieee80211_crypt_ccmp"; break; default: IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", @@ -589,10 +584,8 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, printk("alg name:%s\n",alg); ops = ieee80211_get_crypto_ops(alg); - if (ops == NULL) { - request_module("%s", module); + if (ops == NULL) ops = ieee80211_get_crypto_ops(alg); - } if (ops == NULL) { IEEE80211_DEBUG_WX("%s: unknown crypto alg %d\n", dev->name, ext->alg); @@ -612,7 +605,7 @@ int ieee80211_wx_set_encode_ext(struct ieee80211_device *ieee, goto done; } new_crypt->ops = ops; - if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) + if (new_crypt->ops) new_crypt->priv = new_crypt->ops->init(idx); if (new_crypt->priv == NULL) { kfree(new_crypt); diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c index 87f8a119276..f890a16096c 100644 --- a/drivers/staging/sep/sep_driver.c +++ b/drivers/staging/sep/sep_driver.c @@ -38,6 +38,7 @@ #include <linux/mm.h> #include <linux/poll.h> #include <linux/wait.h> +#include <linux/sched.h> #include <linux/pci.h> #include <linux/firmware.h> #include <asm/ioctl.h> diff --git a/drivers/staging/stlc45xx/Kconfig b/drivers/staging/stlc45xx/Kconfig deleted file mode 100644 index 947fb75a9c6..00000000000 --- a/drivers/staging/stlc45xx/Kconfig +++ /dev/null @@ -1,8 +0,0 @@ -config STLC45XX - tristate "stlc4550/4560 support" - depends on MAC80211 && WLAN_80211 && SPI_MASTER && GENERIC_HARDIRQS - ---help--- - This is a driver for stlc4550 and stlc4560 chipsets. - - To compile this driver as a module, choose M here: the module will be - called stlc45xx. If unsure, say N. diff --git a/drivers/staging/stlc45xx/Makefile b/drivers/staging/stlc45xx/Makefile deleted file mode 100644 index 7ee32903055..00000000000 --- a/drivers/staging/stlc45xx/Makefile +++ /dev/null @@ -1 +0,0 @@ -obj-$(CONFIG_STLC45XX) += stlc45xx.o diff --git a/drivers/staging/stlc45xx/stlc45xx.c b/drivers/staging/stlc45xx/stlc45xx.c deleted file mode 100644 index be99eb33d81..00000000000 --- a/drivers/staging/stlc45xx/stlc45xx.c +++ /dev/null @@ -1,2594 +0,0 @@ -/* - * This file is part of stlc45xx - * - * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). - * - * Contact: Kalle Valo <kalle.valo@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#include "stlc45xx.h" - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/firmware.h> -#include <linux/delay.h> -#include <linux/irq.h> -#include <linux/spi/spi.h> -#include <linux/etherdevice.h> -#include <linux/gpio.h> -#include <linux/moduleparam.h> - -#include "stlc45xx_lmac.h" - -/* - * gpios should be handled in board files and provided via platform data, - * but because it's currently impossible for stlc45xx to have a header file - * in include/linux, let's use module paramaters for now - */ -static int stlc45xx_gpio_power = 97; -module_param(stlc45xx_gpio_power, int, 0444); -MODULE_PARM_DESC(stlc45xx_gpio_power, "stlc45xx gpio number for power line"); - -static int stlc45xx_gpio_irq = 87; -module_param(stlc45xx_gpio_irq, int, 0444); -MODULE_PARM_DESC(stlc45xx_gpio_irq, "stlc45xx gpio number for irq line"); - -static const u8 default_cal_channels[] = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x09, - 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, - 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xe0, 0x00, 0xe0, 0x00, - 0xe0, 0x00, 0xe0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, - 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, - 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, - 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, - 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, - 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, - 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, - 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x71, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, - 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, - 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, - 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, - 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, - 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, - 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, - 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, - 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, - 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, - 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, - 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, - 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, - 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, - 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, - 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, - 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, - 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, - 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, - 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x09, 0x00, 0x00, 0xc9, 0xff, - 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, - 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, - 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, - 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, - 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, - 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, - 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, - 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, - 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x80, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, - 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, - 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, - 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, - 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, - 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, - 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, - 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, - 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, - 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x09, 0x00, 0x00, 0xc9, - 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, - 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, - 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, - 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, - 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, - 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, - 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, - 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, - 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, - 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x8a, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, - 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, - 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, - 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, - 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, - 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, - 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, - 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, - 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, - 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x09, 0x00, 0x00, - 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, - 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, - 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, - 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, - 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, - 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, - 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, - 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, - 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x94, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, - 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, - 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, - 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, - 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, - 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, - 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, - 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, - 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, - 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x09, 0x00, - 0x00, 0xc9, 0xff, 0xd8, 0xff, 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, - 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, - 0x00, 0xf0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, - 0x54, 0x01, 0xab, 0xf6, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, - 0x42, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, - 0x33, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, - 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0xfb, 0x00, 0xca, 0x79, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, - 0xc0, 0x2b, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, - 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, - 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0xa7, 0x00, 0xa9, 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, - 0x17, 0xc0, 0x17, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, - 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, - 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x06, 0x80, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x9e, 0x09, 0x00, 0x00, 0xc9, 0xff, 0xd8, 0xff, - 0x00, 0x00, 0x00, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, 0x01, 0x10, - 0x01, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xf0, 0x00, 0xd0, 0x00, - 0xd0, 0x00, 0xd0, 0x00, 0xd0, 0x00, 0x54, 0x01, 0xab, 0xf6, 0xc0, - 0x42, 0xc0, 0x42, 0xc0, 0x42, 0xc0, 0x42, 0x00, 0xcb, 0x00, 0xcb, - 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, 0xcb, 0x00, - 0xcb, 0x22, 0x01, 0x37, 0xa9, 0xc0, 0x33, 0xc0, 0x33, 0xc0, 0x33, - 0xc0, 0x33, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, - 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0x00, 0xbc, 0xfb, 0x00, 0xca, 0x79, - 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0xc0, 0x2b, 0x00, 0xb4, 0x00, - 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, 0x00, 0xb4, - 0x00, 0xb4, 0xd0, 0x00, 0x5d, 0x54, 0xc0, 0x21, 0xc0, 0x21, 0xc0, - 0x21, 0xc0, 0x21, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, - 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0x00, 0xaa, 0xa7, 0x00, 0xa9, - 0x3d, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0xc0, 0x17, 0x00, 0xa0, - 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, 0xa0, 0x00, - 0xa0, 0x00, 0xa0, 0x7a, 0x00, 0x06, 0x2c, 0xc0, 0x0d, 0xc0, 0x0d, - 0xc0, 0x0d, 0xc0, 0x0d, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, - 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x96, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x80, 0x80, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00 }; - -static const u8 default_cal_rssi[] = { - 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, - 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, - 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, - 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, - 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, - 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, - 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, - 0x00, 0x00, 0x00, 0x0a, 0x01, 0x72, 0xfe, 0x1a, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00 }; - -static void stlc45xx_tx_edcf(struct stlc45xx *stlc); -static void stlc45xx_tx_setup(struct stlc45xx *stlc); -static void stlc45xx_tx_scan(struct stlc45xx *stlc); -static void stlc45xx_tx_psm(struct stlc45xx *stlc, bool enable); -static int stlc45xx_tx_nullfunc(struct stlc45xx *stlc, bool powersave); -static int stlc45xx_tx_pspoll(struct stlc45xx *stlc, bool powersave); - -static ssize_t stlc45xx_sysfs_show_cal_rssi(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct stlc45xx *stlc = dev_get_drvdata(dev); - ssize_t len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - len = PAGE_SIZE; - - mutex_lock(&stlc->mutex); - - if (stlc->cal_rssi) - hex_dump_to_buffer(stlc->cal_rssi, RSSI_CAL_ARRAY_LEN, 16, - 2, buf, len, 0); - mutex_unlock(&stlc->mutex); - - len = strlen(buf); - - return len; -} - -static ssize_t stlc45xx_sysfs_store_cal_rssi(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct stlc45xx *stlc = dev_get_drvdata(dev); - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - if (count != RSSI_CAL_ARRAY_LEN) { - stlc45xx_error("invalid cal_rssi length: %zu", count); - count = 0; - goto out_unlock; - } - - kfree(stlc->cal_rssi); - - stlc->cal_rssi = kmemdup(buf, RSSI_CAL_ARRAY_LEN, GFP_KERNEL); - - if (!stlc->cal_rssi) { - stlc45xx_error("failed to allocate memory for cal_rssi"); - count = 0; - goto out_unlock; - } - - out_unlock: - mutex_unlock(&stlc->mutex); - - return count; -} - -static ssize_t stlc45xx_sysfs_show_cal_channels(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct stlc45xx *stlc = dev_get_drvdata(dev); - ssize_t len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - len = PAGE_SIZE; - - mutex_lock(&stlc->mutex); - - if (stlc->cal_channels) - hex_dump_to_buffer(stlc->cal_channels, CHANNEL_CAL_ARRAY_LEN, - 16, 2, buf, len, 0); - - mutex_unlock(&stlc->mutex); - - len = strlen(buf); - - return len; -} - -static ssize_t stlc45xx_sysfs_store_cal_channels(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct stlc45xx *stlc = dev_get_drvdata(dev); - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - if (count != CHANNEL_CAL_ARRAY_LEN) { - stlc45xx_error("invalid cal_channels size: %zu ", count); - count = 0; - goto out_unlock; - } - - kfree(stlc->cal_channels); - - stlc->cal_channels = kmemdup(buf, count, GFP_KERNEL); - - if (!stlc->cal_channels) { - stlc45xx_error("failed to allocate memory for cal_channels"); - count = 0; - goto out_unlock; - } - -out_unlock: - mutex_unlock(&stlc->mutex); - - return count; -} - -static ssize_t stlc45xx_sysfs_show_tx_buf(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct stlc45xx *stlc = dev_get_drvdata(dev); - struct txbuffer *entry; - ssize_t len = 0; - - stlc45xx_debug(DEBUG_FUNC, "%s()", __func__); - - mutex_lock(&stlc->mutex); - - list_for_each_entry(entry, &stlc->tx_sent, tx_list) { - len += sprintf(buf + len, "0x%x: 0x%x-0x%x\n", - entry->handle, entry->start, - entry->end); - } - - mutex_unlock(&stlc->mutex); - - return len; -} - -static DEVICE_ATTR(cal_rssi, S_IRUGO | S_IWUSR, - stlc45xx_sysfs_show_cal_rssi, - stlc45xx_sysfs_store_cal_rssi); -static DEVICE_ATTR(cal_channels, S_IRUGO | S_IWUSR, - stlc45xx_sysfs_show_cal_channels, - stlc45xx_sysfs_store_cal_channels); -static DEVICE_ATTR(tx_buf, S_IRUGO, stlc45xx_sysfs_show_tx_buf, NULL); - -static void stlc45xx_spi_read(struct stlc45xx *stlc, unsigned long addr, - void *buf, size_t len) -{ - struct spi_transfer t[2]; - struct spi_message m; - - /* We first push the address */ - addr = (addr << 8) | ADDR_READ_BIT_15; - - spi_message_init(&m); - memset(t, 0, sizeof(t)); - - t[0].tx_buf = &addr; - t[0].len = 2; - spi_message_add_tail(&t[0], &m); - - t[1].rx_buf = buf; - t[1].len = len; - spi_message_add_tail(&t[1], &m); - - spi_sync(stlc->spi, &m); -} - - -static void stlc45xx_spi_write(struct stlc45xx *stlc, unsigned long addr, - void *buf, size_t len) -{ - struct spi_transfer t[3]; - struct spi_message m; - u16 last_word; - - /* We first push the address */ - addr = addr << 8; - - spi_message_init(&m); - memset(t, 0, sizeof(t)); - - t[0].tx_buf = &addr; - t[0].len = 2; - spi_message_add_tail(&t[0], &m); - - t[1].tx_buf = buf; - t[1].len = len; - spi_message_add_tail(&t[1], &m); - - if (len % 2) { - last_word = ((u8 *)buf)[len - 1]; - - t[2].tx_buf = &last_word; - t[2].len = 2; - spi_message_add_tail(&t[2], &m); - } - - spi_sync(stlc->spi, &m); -} - -static u16 stlc45xx_read16(struct stlc45xx *stlc, unsigned long addr) -{ - u16 val; - - stlc45xx_spi_read(stlc, addr, &val, sizeof(val)); - - return val; -} - -static u32 stlc45xx_read32(struct stlc45xx *stlc, unsigned long addr) -{ - u32 val; - - stlc45xx_spi_read(stlc, addr, &val, sizeof(val)); - - return val; -} - -static void stlc45xx_write16(struct stlc45xx *stlc, unsigned long addr, u16 val) -{ - stlc45xx_spi_write(stlc, addr, &val, sizeof(val)); -} - -static void stlc45xx_write32(struct stlc45xx *stlc, unsigned long addr, u32 val) -{ - stlc45xx_spi_write(stlc, addr, &val, sizeof(val)); -} - -struct stlc45xx_spi_reg { - u16 address; - u16 length; - char *name; -}; - -/* caller must hold tx_lock */ -static void stlc45xx_txbuffer_dump(struct stlc45xx *stlc) -{ - struct txbuffer *txbuffer; - char *buf, *pos; - int buf_len, l, count; - - if (!(DEBUG_LEVEL & DEBUG_TXBUFFER)) - return; - - stlc45xx_debug(DEBUG_FUNC, "%s()", __func__); - - buf_len = 500; - buf = kmalloc(buf_len, GFP_ATOMIC); - if (!buf) - return; - - pos = buf; - count = 0; - - list_for_each_entry(txbuffer, &stlc->txbuffer, buffer_list) { - l = snprintf(pos, buf_len, "0x%x-0x%x,", - txbuffer->start, txbuffer->end); - /* drop the null byte */ - pos += l; - buf_len -= l; - count++; - } - - if (count == 0) - *pos = '\0'; - else - *--pos = '\0'; - - stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: in buffer %d regions: %s", - count, buf); - - kfree(buf); -} - -/* caller must hold tx_lock */ -static int stlc45xx_txbuffer_find(struct stlc45xx *stlc, size_t len) -{ - struct txbuffer *txbuffer; - int pos; - - stlc45xx_debug(DEBUG_FUNC, "%s()", __func__); - - pos = FIRMWARE_TXBUFFER_START; - - if (list_empty(&stlc->txbuffer)) - goto out; - - /* - * the entries in txbuffer must be in the same order as they are in - * the real buffer - */ - list_for_each_entry(txbuffer, &stlc->txbuffer, buffer_list) { - if (pos + len < txbuffer->start) - break; - pos = ALIGN(txbuffer->end + 1, 4); - } - - if (pos + len > FIRMWARE_TXBUFFER_END) - /* not enough room */ - pos = -1; - - stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: find %zu B: 0x%x", len, pos); - -out: - return pos; -} - -static int stlc45xx_txbuffer_add(struct stlc45xx *stlc, - struct txbuffer *txbuffer) -{ - struct txbuffer *r, *prev = NULL; - - if (list_empty(&stlc->txbuffer)) { - list_add(&txbuffer->buffer_list, &stlc->txbuffer); - return 0; - } - - r = list_first_entry(&stlc->txbuffer, struct txbuffer, buffer_list); - - if (txbuffer->start < r->start) { - /* add to the beginning of the list */ - list_add(&txbuffer->buffer_list, &stlc->txbuffer); - return 0; - } - - prev = NULL; - list_for_each_entry(r, &stlc->txbuffer, buffer_list) { - /* skip first entry, we checked for that above */ - if (!prev) { - prev = r; - continue; - } - - /* double-check overlaps */ - WARN_ON_ONCE(txbuffer->start >= r->start && - txbuffer->start <= r->end); - WARN_ON_ONCE(txbuffer->end >= r->start && - txbuffer->end <= r->end); - - if (prev->end < txbuffer->start && - txbuffer->end < r->start) { - /* insert at this spot */ - list_add_tail(&txbuffer->buffer_list, &r->buffer_list); - return 0; - } - - prev = r; - } - - /* not found */ - list_add_tail(&txbuffer->buffer_list, &stlc->txbuffer); - - return 0; - -} - -/* caller must hold tx_lock */ -static struct txbuffer *stlc45xx_txbuffer_alloc(struct stlc45xx *stlc, - size_t frame_len) -{ - struct txbuffer *entry = NULL; - size_t len; - int pos; - - stlc45xx_debug(DEBUG_FUNC, "%s()", __func__); - - len = FIRMWARE_TXBUFFER_HEADER + frame_len + FIRMWARE_TXBUFFER_TRAILER; - pos = stlc45xx_txbuffer_find(stlc, len); - - if (pos < 0) - return NULL; - - WARN_ON_ONCE(pos + len > FIRMWARE_TXBUFFER_END); - WARN_ON_ONCE(pos < FIRMWARE_TXBUFFER_START); - - entry = kmalloc(sizeof(*entry), GFP_ATOMIC); - entry->start = pos; - entry->frame_start = pos + FIRMWARE_TXBUFFER_HEADER; - entry->end = entry->start + len - 1; - - stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: allocated 0x%x-0x%x", - entry->start, entry->end); - - stlc45xx_txbuffer_add(stlc, entry); - - stlc45xx_txbuffer_dump(stlc); - - return entry; -} - -/* caller must hold tx_lock */ -static void stlc45xx_txbuffer_free(struct stlc45xx *stlc, - struct txbuffer *txbuffer) -{ - stlc45xx_debug(DEBUG_FUNC, "%s()", __func__); - - stlc45xx_debug(DEBUG_TXBUFFER, "txbuffer: freed 0x%x-0x%x", - txbuffer->start, txbuffer->end); - - list_del(&txbuffer->buffer_list); - kfree(txbuffer); -} - - -static int stlc45xx_wait_bit(struct stlc45xx *stlc, u16 reg, u32 mask, - u32 expected) -{ - int i; - char buffer[4]; - - for (i = 0; i < 2000; i++) { - stlc45xx_spi_read(stlc, reg, buffer, sizeof(buffer)); - if (((*(u32 *)buffer) & mask) == expected) - return 1; - msleep(1); - } - - return 0; -} - -static int stlc45xx_request_firmware(struct stlc45xx *stlc) -{ - const struct firmware *fw; - int ret; - - /* FIXME: should driver use it's own struct device? */ - ret = request_firmware(&fw, "3826.arm", &stlc->spi->dev); - - if (ret < 0) { - stlc45xx_error("request_firmware() failed: %d", ret); - return ret; - } - - if (fw->size % 4) { - stlc45xx_error("firmware size is not multiple of 32bit: %zu", - fw->size); - return -EILSEQ; /* Illegal byte sequence */; - } - - if (fw->size < 1000) { - stlc45xx_error("firmware is too small: %zu", fw->size); - return -EILSEQ; - } - - stlc->fw = kmemdup(fw->data, fw->size, GFP_KERNEL); - if (!stlc->fw) { - stlc45xx_error("could not allocate memory for firmware"); - return -ENOMEM; - } - - stlc->fw_len = fw->size; - - release_firmware(fw); - - return 0; -} - -static int stlc45xx_upload_firmware(struct stlc45xx *stlc) -{ - struct s_dma_regs dma_regs; - unsigned long fw_len, fw_addr; - long _fw_len; - int ret; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - if (!stlc->fw) { - ret = stlc45xx_request_firmware(stlc); - if (ret < 0) - return ret; - } - - /* stop the device */ - stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT, - SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET - | SPI_CTRL_STAT_START_HALTED); - - msleep(TARGET_BOOT_SLEEP); - - stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT, - SPI_CTRL_STAT_HOST_OVERRIDE - | SPI_CTRL_STAT_START_HALTED); - - msleep(TARGET_BOOT_SLEEP); - - fw_addr = FIRMWARE_ADDRESS; - fw_len = stlc->fw_len; - - while (fw_len > 0) { - _fw_len = (fw_len > SPI_MAX_PACKET_SIZE) - ? SPI_MAX_PACKET_SIZE : fw_len; - dma_regs.cmd = SPI_DMA_WRITE_CTRL_ENABLE; - dma_regs.len = cpu_to_le16(_fw_len); - dma_regs.addr = cpu_to_le32(fw_addr); - - fw_len -= _fw_len; - fw_addr += _fw_len; - - stlc45xx_write16(stlc, SPI_ADRS_DMA_WRITE_CTRL, dma_regs.cmd); - - if (stlc45xx_wait_bit(stlc, SPI_ADRS_DMA_WRITE_CTRL, - HOST_ALLOWED, HOST_ALLOWED) == 0) { - stlc45xx_error("fw_upload not allowed to DMA write"); - return -EAGAIN; - } - - stlc45xx_write16(stlc, SPI_ADRS_DMA_WRITE_LEN, dma_regs.len); - stlc45xx_write32(stlc, SPI_ADRS_DMA_WRITE_BASE, dma_regs.addr); - - stlc45xx_spi_write(stlc, SPI_ADRS_DMA_DATA, stlc->fw, _fw_len); - - /* FIXME: I think this doesn't work if firmware is large, - * this loop goes to second round. fw->data is not - * increased at all! */ - } - - BUG_ON(fw_len != 0); - - /* enable host interrupts */ - stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_EN, SPI_HOST_INTS_DEFAULT); - - /* boot the device */ - stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT, - SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET - | SPI_CTRL_STAT_RAM_BOOT); - - msleep(TARGET_BOOT_SLEEP); - - stlc45xx_write16(stlc, SPI_ADRS_DEV_CTRL_STAT, - SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT); - msleep(TARGET_BOOT_SLEEP); - - return 0; -} - -/* caller must hold tx_lock */ -static void stlc45xx_check_txsent(struct stlc45xx *stlc) -{ - struct txbuffer *entry, *n; - - list_for_each_entry_safe(entry, n, &stlc->tx_sent, tx_list) { - if (time_after(jiffies, entry->lifetime)) { - if (net_ratelimit()) - stlc45xx_warning("frame 0x%x lifetime exceeded", - entry->start); - list_del(&entry->tx_list); - skb_pull(entry->skb, entry->header_len); - ieee80211_tx_status(stlc->hw, entry->skb); - stlc45xx_txbuffer_free(stlc, entry); - } - } -} - -static void stlc45xx_power_off(struct stlc45xx *stlc) -{ - disable_irq(gpio_to_irq(stlc45xx_gpio_irq)); - gpio_set_value(stlc45xx_gpio_power, 0); -} - -static void stlc45xx_power_on(struct stlc45xx *stlc) -{ - gpio_set_value(stlc45xx_gpio_power, 1); - enable_irq(gpio_to_irq(stlc45xx_gpio_irq)); - - /* - * need to wait a while before device can be accessed, the length - * is just a guess - */ - msleep(10); -} - -/* caller must hold tx_lock */ -static void stlc45xx_flush_queues(struct stlc45xx *stlc) -{ - struct txbuffer *entry; - - while (!list_empty(&stlc->tx_sent)) { - entry = list_first_entry(&stlc->tx_sent, - struct txbuffer, tx_list); - list_del(&entry->tx_list); - dev_kfree_skb(entry->skb); - stlc45xx_txbuffer_free(stlc, entry); - } - - WARN_ON(!list_empty(&stlc->tx_sent)); - - while (!list_empty(&stlc->tx_pending)) { - entry = list_first_entry(&stlc->tx_pending, - struct txbuffer, tx_list); - list_del(&entry->tx_list); - dev_kfree_skb(entry->skb); - stlc45xx_txbuffer_free(stlc, entry); - } - - WARN_ON(!list_empty(&stlc->tx_pending)); - WARN_ON(!list_empty(&stlc->txbuffer)); -} - -static void stlc45xx_work_reset(struct work_struct *work) -{ - struct stlc45xx *stlc = container_of(work, struct stlc45xx, - work_reset); - - mutex_lock(&stlc->mutex); - - if (stlc->fw_state != FW_STATE_RESET) - goto out; - - stlc45xx_power_off(stlc); - - mutex_unlock(&stlc->mutex); - - /* wait that all work_structs have finished, we can't hold - * stlc->mutex to avoid deadlock */ - cancel_work_sync(&stlc->work); - - /* FIXME: find out good value to wait for chip power down */ - msleep(100); - - mutex_lock(&stlc->mutex); - - /* FIXME: we should gracefully handle if the state has changed - * after re-acquiring mutex */ - WARN_ON(stlc->fw_state != FW_STATE_RESET); - - spin_lock_bh(&stlc->tx_lock); - stlc45xx_flush_queues(stlc); - spin_unlock_bh(&stlc->tx_lock); - - stlc->fw_state = FW_STATE_RESETTING; - - stlc45xx_power_on(stlc); - stlc45xx_upload_firmware(stlc); - -out: - mutex_unlock(&stlc->mutex); -} - -/* caller must hold mutex */ -static void stlc45xx_reset(struct stlc45xx *stlc) -{ - stlc45xx_warning("resetting firmware"); - stlc->fw_state = FW_STATE_RESET; - ieee80211_stop_queues(stlc->hw); - queue_work(stlc->hw->workqueue, &stlc->work_reset); -} - -static void stlc45xx_work_tx_timeout(struct work_struct *work) -{ - struct stlc45xx *stlc = container_of(work, struct stlc45xx, - work_tx_timeout.work); - - stlc45xx_warning("tx timeout"); - - mutex_lock(&stlc->mutex); - - if (stlc->fw_state != FW_STATE_READY) - goto out; - - stlc45xx_reset(stlc); - -out: - mutex_unlock(&stlc->mutex); -} - -static void stlc45xx_int_ack(struct stlc45xx *stlc, u32 val) -{ - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_ACK, val); -} - -static void stlc45xx_wakeup(struct stlc45xx *stlc) -{ - unsigned long timeout; - u32 ints; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - /* wake the chip */ - stlc45xx_write32(stlc, SPI_ADRS_ARM_INTERRUPTS, SPI_TARGET_INT_WAKEUP); - - /* And wait for the READY interrupt */ - timeout = jiffies + HZ; - - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - while (!(ints & SPI_HOST_INT_READY)) { - if (time_after(jiffies, timeout)) - goto out; - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - } - - stlc45xx_int_ack(stlc, SPI_HOST_INT_READY); - -out: - return; -} - -static void stlc45xx_sleep(struct stlc45xx *stlc) -{ - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - stlc45xx_write32(stlc, SPI_ADRS_ARM_INTERRUPTS, SPI_TARGET_INT_SLEEP); -} - -static void stlc45xx_int_ready(struct stlc45xx *stlc) -{ - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - stlc45xx_write32(stlc, SPI_ADRS_HOST_INT_EN, - SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE); - - switch (stlc->fw_state) { - case FW_STATE_BOOTING: - stlc->fw_state = FW_STATE_READY; - complete(&stlc->fw_comp); - break; - case FW_STATE_RESETTING: - stlc->fw_state = FW_STATE_READY; - - stlc45xx_tx_scan(stlc); - stlc45xx_tx_setup(stlc); - stlc45xx_tx_edcf(stlc); - - ieee80211_wake_queues(stlc->hw); - break; - default: - break; - } -} - -static int stlc45xx_rx_txack(struct stlc45xx *stlc, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info; - struct s_lm_control *control; - struct s_lmo_tx *tx; - struct txbuffer *entry; - int found = 0; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - control = (struct s_lm_control *) skb->data; - tx = (struct s_lmo_tx *) (control + 1); - - if (list_empty(&stlc->tx_sent)) { - if (net_ratelimit()) - stlc45xx_warning("no frames waiting for " - "acknowledgement"); - return -1; - } - - list_for_each_entry(entry, &stlc->tx_sent, tx_list) { - if (control->handle == entry->handle) { - found = 1; - break; - } - } - - if (!found) { - if (net_ratelimit()) - stlc45xx_warning("couldn't find frame for tx ack 0x%x", - control->handle); - return -1; - } - - stlc45xx_debug(DEBUG_TX, "TX ACK 0x%x", entry->handle); - - if (entry->status_needed) { - info = IEEE80211_SKB_CB(entry->skb); - - if (!(tx->flags & LM_TX_FAILED)) { - /* frame was acked */ - info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = tx->rcpi / 2 - 110; - } - - skb_pull(entry->skb, entry->header_len); - - ieee80211_tx_status(stlc->hw, entry->skb); - } - - list_del(&entry->tx_list); - - stlc45xx_check_txsent(stlc); - if (list_empty(&stlc->tx_sent)) - /* there are no pending frames, we can stop the tx timeout - * timer */ - cancel_delayed_work(&stlc->work_tx_timeout); - - spin_lock_bh(&stlc->tx_lock); - - stlc45xx_txbuffer_free(stlc, entry); - - if (stlc->tx_queue_stopped && - stlc45xx_txbuffer_find(stlc, MAX_FRAME_LEN) != -1) { - stlc45xx_debug(DEBUG_QUEUE, "room in tx buffer, waking queues"); - ieee80211_wake_queues(stlc->hw); - stlc->tx_queue_stopped = 0; - } - - spin_unlock_bh(&stlc->tx_lock); - - return 0; -} - -static int stlc45xx_rx_control(struct stlc45xx *stlc, struct sk_buff *skb) -{ - struct s_lm_control *control; - int ret = 0; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - control = (struct s_lm_control *) skb->data; - - switch (control->oid) { - case LM_OID_TX: - ret = stlc45xx_rx_txack(stlc, skb); - break; - case LM_OID_SETUP: - case LM_OID_SCAN: - case LM_OID_TRAP: - case LM_OID_EDCF: - case LM_OID_KEYCACHE: - case LM_OID_PSM: - case LM_OID_STATS: - case LM_OID_LED: - default: - stlc45xx_warning("unhandled rx control oid %d\n", - control->oid); - break; - } - - dev_kfree_skb(skb); - - return ret; -} - -/* copied from mac80211 */ -static void stlc45xx_parse_elems(u8 *start, size_t len, - struct stlc45xx_ie_tim **tim, - size_t *tim_len) -{ - size_t left = len; - u8 *pos = start; - - while (left >= 2) { - u8 id, elen; - - id = *pos++; - elen = *pos++; - left -= 2; - - if (elen > left) - return; - - switch (id) { - case WLAN_EID_TIM: - *tim = (struct stlc45xx_ie_tim *) pos; - *tim_len = elen; - break; - default: - break; - } - - left -= elen; - pos += elen; - } -} - -/* - * mac80211 doesn't have support for asking frames with PS-Poll, so let's - * implement in the driver for now. We have to add support to mac80211 - * later. - */ -static int stlc45xx_check_more_data(struct stlc45xx *stlc, struct sk_buff *skb) -{ - struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data; - struct ieee80211_hdr *hdr; - size_t len; - u16 fc; - - hdr = (void *) skb->data + sizeof(*data); - len = skb->len - sizeof(*data); - - /* minimum frame length is the null frame length 24 bytes */ - if (len < 24) { - stlc45xx_warning("invalid frame length when checking for " - "more data"); - return -EINVAL; - } - - fc = le16_to_cpu(hdr->frame_control); - if (!(fc & IEEE80211_FCTL_FROMDS)) - /* this is not from DS */ - return 0; - - if (compare_ether_addr(hdr->addr1, stlc->mac_addr) != 0) - /* the frame was not for us */ - return 0; - - if (!(fc & IEEE80211_FCTL_MOREDATA)) { - /* AP has no more frames buffered for us */ - stlc45xx_debug(DEBUG_PSM, "all buffered frames retrieved"); - stlc->pspolling = false; - return 0; - } - - /* MOREDATA bit is set, let's ask for a new frame from the AP */ - stlc45xx_tx_pspoll(stlc, stlc->psm); - - return 0; -} - -/* - * mac80211 cannot read TIM from beacons, so let's add a hack to the - * driver. We have to add support to mac80211 later. - */ -static int stlc45xx_rx_data_beacon(struct stlc45xx *stlc, struct sk_buff *skb) -{ - struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data; - size_t len = skb->len, tim_len = 0, baselen, pvbmap_len; - struct ieee80211_mgmt *mgmt; - struct stlc45xx_ie_tim *tim = NULL; - int bmap_offset, index, aid_bit; - - mgmt = (void *) skb->data + sizeof(*data); - - baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt; - if (baselen > len) { - stlc45xx_warning("invalid baselen in beacon"); - return -EINVAL; - } - - stlc45xx_parse_elems(mgmt->u.beacon.variable, len - baselen, &tim, - &tim_len); - - if (!tim) { - stlc45xx_warning("didn't find tim from a beacon"); - return -EINVAL; - } - - bmap_offset = tim->bmap_control & 0xfe; - index = stlc->aid / 8 - bmap_offset; - - pvbmap_len = tim_len - 3; - if (index > pvbmap_len) - return -EINVAL; - - aid_bit = !!(tim->pvbmap[index] & (1 << stlc->aid % 8)); - - stlc45xx_debug(DEBUG_PSM, "fc 0x%x duration %d seq %d dtim %u " - "bmap_control 0x%x aid_bit %d", - mgmt->frame_control, mgmt->duration, mgmt->seq_ctrl >> 4, - tim->dtim_count, tim->bmap_control, aid_bit); - - if (!aid_bit) - return 0; - - stlc->pspolling = true; - stlc45xx_tx_pspoll(stlc, stlc->psm); - - return 0; -} - -static int stlc45xx_rx_data(struct stlc45xx *stlc, struct sk_buff *skb) -{ - struct ieee80211_rx_status status; - struct s_lm_data_in *data = (struct s_lm_data_in *) skb->data; - int align = 0; - u8 *p, align_len; - u16 len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - if (stlc->psm) { - if (data->flags & LM_IN_BEACON) - stlc45xx_rx_data_beacon(stlc, skb); - else if (stlc->pspolling && (data->flags & LM_IN_DATA)) - stlc45xx_check_more_data(stlc, skb); - } - - memset(&status, 0, sizeof(status)); - - status.freq = data->frequency; - status.signal = data->rcpi / 2 - 110; - - /* let's assume that maximum rcpi value is 140 (= 35 dBm) */ - status.qual = data->rcpi * 100 / 140; - - status.band = IEEE80211_BAND_2GHZ; - - /* - * FIXME: this gives warning from __ieee80211_rx() - * - * status.rate_idx = data->rate; - */ - - len = data->length; - - if (data->flags & LM_FLAG_ALIGN) - align = 1; - - skb_pull(skb, sizeof(*data)); - - if (align) { - p = skb->data; - align_len = *p; - skb_pull(skb, align_len); - } - - skb_trim(skb, len); - - stlc45xx_debug(DEBUG_RX, "rx data 0x%p %d B", skb->data, skb->len); - stlc45xx_dump(DEBUG_RX_CONTENT, skb->data, skb->len); - - memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); - ieee80211_rx(stlc->hw, skb); - - return 0; -} - - - -static int stlc45xx_rx(struct stlc45xx *stlc) -{ - struct s_lm_control *control; - struct sk_buff *skb; - int ret; - u16 len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - stlc45xx_wakeup(stlc); - - /* dummy read to flush SPI DMA controller bug */ - stlc45xx_read16(stlc, SPI_ADRS_GEN_PURP_1); - - len = stlc45xx_read16(stlc, SPI_ADRS_DMA_DATA); - - if (len == 0) { - stlc45xx_warning("rx request of zero bytes"); - return 0; - } - - skb = dev_alloc_skb(len); - if (!skb) { - stlc45xx_warning("could not alloc skb"); - return 0; - } - - stlc45xx_spi_read(stlc, SPI_ADRS_DMA_DATA, skb_put(skb, len), len); - - stlc45xx_sleep(stlc); - - stlc45xx_debug(DEBUG_RX, "rx frame 0x%p %d B", skb->data, skb->len); - stlc45xx_dump(DEBUG_RX_CONTENT, skb->data, skb->len); - - control = (struct s_lm_control *) skb->data; - - if (control->flags & LM_FLAG_CONTROL) - ret = stlc45xx_rx_control(stlc, skb); - else - ret = stlc45xx_rx_data(stlc, skb); - - return ret; -} - - -static irqreturn_t stlc45xx_interrupt(int irq, void *config) -{ - struct spi_device *spi = config; - struct stlc45xx *stlc = dev_get_drvdata(&spi->dev); - - stlc45xx_debug(DEBUG_IRQ, "IRQ"); - - queue_work(stlc->hw->workqueue, &stlc->work); - - return IRQ_HANDLED; -} - -static int stlc45xx_tx_frame(struct stlc45xx *stlc, u32 address, - void *buf, size_t len) -{ - struct s_dma_regs dma_regs; - unsigned long timeout; - int ret = 0; - u32 ints; - - stlc->tx_frames++; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - stlc45xx_debug(DEBUG_TX, "tx frame 0x%p %zu B", buf, len); - stlc45xx_dump(DEBUG_TX_CONTENT, buf, len); - - stlc45xx_wakeup(stlc); - - dma_regs.cmd = SPI_DMA_WRITE_CTRL_ENABLE; - dma_regs.len = cpu_to_le16(len); - dma_regs.addr = cpu_to_le32(address); - - stlc45xx_spi_write(stlc, SPI_ADRS_DMA_WRITE_CTRL, &dma_regs, - sizeof(dma_regs)); - - stlc45xx_spi_write(stlc, SPI_ADRS_DMA_DATA, buf, len); - - timeout = jiffies + 2 * HZ; - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - while (!(ints & SPI_HOST_INT_WR_READY)) { - if (time_after(jiffies, timeout)) { - stlc45xx_warning("WR_READY timeout"); - ret = -1; - goto out; - } - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - } - - stlc45xx_int_ack(stlc, SPI_HOST_INT_WR_READY); - - stlc45xx_sleep(stlc); - -out: - return ret; -} - -static int stlc45xx_wq_tx(struct stlc45xx *stlc) -{ - struct txbuffer *entry; - int ret = 0; - - spin_lock_bh(&stlc->tx_lock); - - while (!list_empty(&stlc->tx_pending)) { - entry = list_entry(stlc->tx_pending.next, - struct txbuffer, tx_list); - - list_del_init(&entry->tx_list); - - spin_unlock_bh(&stlc->tx_lock); - - ret = stlc45xx_tx_frame(stlc, entry->frame_start, - entry->skb->data, entry->skb->len); - - spin_lock_bh(&stlc->tx_lock); - - if (ret < 0) { - /* frame transfer to firmware buffer failed */ - /* FIXME: report this to mac80211 */ - dev_kfree_skb(entry->skb); - stlc45xx_txbuffer_free(stlc, entry); - goto out; - } - - list_add(&entry->tx_list, &stlc->tx_sent); - queue_delayed_work(stlc->hw->workqueue, - &stlc->work_tx_timeout, - msecs_to_jiffies(TX_TIMEOUT)); - } - -out: - spin_unlock_bh(&stlc->tx_lock); - return ret; -} - -static void stlc45xx_work(struct work_struct *work) -{ - struct stlc45xx *stlc = container_of(work, struct stlc45xx, work); - u32 ints; - int ret; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - if (stlc->fw_state == FW_STATE_OFF && - stlc->fw_state == FW_STATE_RESET) - goto out; - - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - stlc45xx_debug(DEBUG_BH, "begin host_ints 0x%08x", ints); - - if (ints & SPI_HOST_INT_READY) { - stlc45xx_int_ready(stlc); - stlc45xx_int_ack(stlc, SPI_HOST_INT_READY); - } - - if (stlc->fw_state != FW_STATE_READY) - goto out; - - if (ints & SPI_HOST_INT_UPDATE) { - stlc45xx_int_ack(stlc, SPI_HOST_INT_UPDATE); - ret = stlc45xx_rx(stlc); - if (ret < 0) { - stlc45xx_reset(stlc); - goto out; - } - } - if (ints & SPI_HOST_INT_SW_UPDATE) { - stlc45xx_int_ack(stlc, SPI_HOST_INT_SW_UPDATE); - ret = stlc45xx_rx(stlc); - if (ret < 0) { - stlc45xx_reset(stlc); - goto out; - } - } - - ret = stlc45xx_wq_tx(stlc); - if (ret < 0) { - stlc45xx_reset(stlc); - goto out; - } - - ints = stlc45xx_read32(stlc, SPI_ADRS_HOST_INTERRUPTS); - stlc45xx_debug(DEBUG_BH, "end host_ints 0x%08x", ints); - -out: - mutex_unlock(&stlc->mutex); -} - -static void stlc45xx_tx_edcf(struct stlc45xx *stlc) -{ - struct s_lm_control *control; - struct s_lmo_edcf *edcf; - size_t len, edcf_len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - edcf_len = sizeof(*edcf); - len = sizeof(*control) + edcf_len; - control = kzalloc(len, GFP_KERNEL); - edcf = (struct s_lmo_edcf *) (control + 1); - - control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET; - control->length = edcf_len; - control->oid = LM_OID_EDCF; - - edcf->slottime = 0x14; - edcf->sifs = 10; - edcf->eofpad = 6; - edcf->maxburst = 1500; - - edcf->queues[0].aifs = 2; - edcf->queues[0].pad0 = 1; - edcf->queues[0].cwmin = 3; - edcf->queues[0].cwmax = 7; - edcf->queues[0].txop = 47; - edcf->queues[1].aifs = 2; - edcf->queues[1].pad0 = 0; - edcf->queues[1].cwmin = 7; - edcf->queues[1].cwmax = 15; - edcf->queues[1].txop = 94; - edcf->queues[2].aifs = 3; - edcf->queues[2].pad0 = 0; - edcf->queues[2].cwmin = 15; - edcf->queues[2].cwmax = 1023; - edcf->queues[2].txop = 0; - edcf->queues[3].aifs = 7; - edcf->queues[3].pad0 = 0; - edcf->queues[3].cwmin = 15; - edcf->queues[3].cwmax = 1023; - edcf->queues[3].txop = 0; - edcf->queues[4].aifs = 13; - edcf->queues[4].pad0 = 99; - edcf->queues[4].cwmin = 3437; - edcf->queues[4].cwmax = 512; - edcf->queues[4].txop = 12; - edcf->queues[5].aifs = 142; - edcf->queues[5].pad0 = 109; - edcf->queues[5].cwmin = 8756; - edcf->queues[5].cwmax = 6; - edcf->queues[5].txop = 0; - edcf->queues[6].aifs = 4; - edcf->queues[6].pad0 = 0; - edcf->queues[6].cwmin = 0; - edcf->queues[6].cwmax = 58705; - edcf->queues[6].txop = 25716; - edcf->queues[7].aifs = 0; - edcf->queues[7].pad0 = 0; - edcf->queues[7].cwmin = 0; - edcf->queues[7].cwmax = 0; - edcf->queues[7].txop = 0; - - stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len); - - kfree(control); -} - -static void stlc45xx_tx_setup(struct stlc45xx *stlc) -{ - struct s_lm_control *control; - struct s_lmo_setup *setup; - size_t len, setup_len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - setup_len = sizeof(*setup); - len = sizeof(*control) + setup_len; - control = kzalloc(len, GFP_KERNEL); - setup = (struct s_lmo_setup *) (control + 1); - - control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET; - control->length = setup_len; - control->oid = LM_OID_SETUP; - - setup->flags = LM_SETUP_INFRA; - setup->antenna = 2; - setup->rx_align = 0; - setup->rx_buffer = FIRMWARE_RXBUFFER_START; - setup->rx_mtu = FIRMWARE_MTU; - setup->frontend = 5; - setup->timeout = 0; - setup->truncate = 48896; - setup->bratemask = 0xffffffff; - setup->ref_clock = 644245094; - setup->lpf_bandwidth = 65535; - setup->osc_start_delay = 65535; - - memcpy(setup->macaddr, stlc->mac_addr, ETH_ALEN); - memcpy(setup->bssid, stlc->bssid, ETH_ALEN); - - stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len); - - kfree(control); -} - -static void stlc45xx_tx_scan(struct stlc45xx *stlc) -{ - struct s_lm_control *control; - struct s_lmo_scan *scan; - size_t len, scan_len; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - scan_len = sizeof(*scan); - len = sizeof(*control) + scan_len; - control = kzalloc(len, GFP_KERNEL); - scan = (struct s_lmo_scan *) (control + 1); - - control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET; - control->length = scan_len; - control->oid = LM_OID_SCAN; - - scan->flags = LM_SCAN_EXIT; - scan->bratemask = 0x15f; - scan->aloft[0] = 3; - scan->aloft[1] = 3; - scan->aloft[2] = 1; - scan->aloft[3] = 0; - scan->aloft[4] = 0; - scan->aloft[5] = 0; - scan->aloft[6] = 0; - scan->aloft[7] = 0; - - memcpy(&scan->rssical, &stlc->cal_rssi[(stlc->channel - 1) * - RSSI_CAL_LEN], - RSSI_CAL_LEN); - memcpy(&scan->channel, &stlc->cal_channels[(stlc->channel - 1) * - CHANNEL_CAL_LEN], - CHANNEL_CAL_LEN); - - stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len); - - kfree(control); -} - -/* - * caller must hold mutex - */ -static int stlc45xx_tx_pspoll(struct stlc45xx *stlc, bool powersave) -{ - struct ieee80211_hdr *pspoll; - int payload_len, padding, i; - struct s_lm_data_out *data; - struct txbuffer *entry; - struct sk_buff *skb; - char *payload; - u16 fc; - - skb = dev_alloc_skb(stlc->hw->extra_tx_headroom + 16); - if (!skb) { - stlc45xx_warning("failed to allocate pspoll frame"); - return -ENOMEM; - } - skb_reserve(skb, stlc->hw->extra_tx_headroom); - - pspoll = (struct ieee80211_hdr *) skb_put(skb, 16); - memset(pspoll, 0, 16); - fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL; - if (powersave) - fc |= IEEE80211_FCTL_PM; - pspoll->frame_control = cpu_to_le16(fc); - pspoll->duration_id = cpu_to_le16(stlc->aid); - - /* aid in PS-Poll has its two MSBs each set to 1 */ - pspoll->duration_id |= cpu_to_le16(1 << 15) | cpu_to_le16(1 << 14); - - memcpy(pspoll->addr1, stlc->bssid, ETH_ALEN); - memcpy(pspoll->addr2, stlc->mac_addr, ETH_ALEN); - - stlc45xx_debug(DEBUG_PSM, "sending PS-Poll frame to %pM (powersave %d, " - "fc 0x%x, aid %d)", pspoll->addr1, - powersave, fc, stlc->aid); - - spin_lock_bh(&stlc->tx_lock); - - entry = stlc45xx_txbuffer_alloc(stlc, skb->len); - - spin_unlock_bh(&stlc->tx_lock); - - if (!entry) { - /* - * The queue should be stopped before the firmware buffer - * is full, so firmware buffer should always have enough - * space. - * - * But I'm too lazy and omit it for now. - */ - if (net_ratelimit()) - stlc45xx_warning("firmware tx buffer full is full " - "for null frame"); - return -ENOSPC; - } - - payload = skb->data; - payload_len = skb->len; - padding = (int) (skb->data - sizeof(*data)) & 3; - entry->header_len = sizeof(*data) + padding; - - entry->skb = skb; - entry->status_needed = false; - entry->handle = (u32) skb; - entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME); - - stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B " - "padding %d header_len %d)", - entry->handle, payload, payload_len, padding, - entry->header_len); - stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len); - - data = (struct s_lm_data_out *) skb_push(skb, entry->header_len); - - memset(data, 0, entry->header_len); - - if (padding) - data->flags = LM_FLAG_ALIGN; - - data->flags = LM_OUT_BURST; - data->length = payload_len; - data->handle = entry->handle; - data->aid = 1; - data->rts_retries = 7; - data->retries = 7; - data->aloft_ctrl = 0; - data->crypt_offset = 58; - data->keytype = 0; - data->keylen = 0; - data->queue = LM_QUEUE_DATA3; - data->backlog = 32; - data->antenna = 2; - data->cts = 3; - data->power = 127; - - for (i = 0; i < 8; i++) - data->aloft[i] = 0; - - /* - * check if there's enough space in tx buffer - * - * FIXME: ignored for now - */ - - stlc45xx_tx_frame(stlc, entry->start, skb->data, skb->len); - - list_add(&entry->tx_list, &stlc->tx_sent); - - return 0; -} - -/* - * caller must hold mutex - * - * shamelessly stolen from mac80211/ieee80211_send_nullfunc - */ -static int stlc45xx_tx_nullfunc(struct stlc45xx *stlc, bool powersave) -{ - struct ieee80211_hdr *nullfunc; - int payload_len, padding, i; - struct s_lm_data_out *data; - struct txbuffer *entry; - struct sk_buff *skb; - char *payload; - u16 fc; - - skb = dev_alloc_skb(stlc->hw->extra_tx_headroom + 24); - if (!skb) { - stlc45xx_warning("failed to allocate buffer for null frame\n"); - return -ENOMEM; - } - skb_reserve(skb, stlc->hw->extra_tx_headroom); - - nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24); - memset(nullfunc, 0, 24); - fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | - IEEE80211_FCTL_TODS; - - if (powersave) - fc |= IEEE80211_FCTL_PM; - - nullfunc->frame_control = cpu_to_le16(fc); - memcpy(nullfunc->addr1, stlc->bssid, ETH_ALEN); - memcpy(nullfunc->addr2, stlc->mac_addr, ETH_ALEN); - memcpy(nullfunc->addr3, stlc->bssid, ETH_ALEN); - - stlc45xx_debug(DEBUG_PSM, "sending Null frame to %pM (powersave %d, " - "fc 0x%x)", nullfunc->addr1, powersave, fc); - - spin_lock_bh(&stlc->tx_lock); - - entry = stlc45xx_txbuffer_alloc(stlc, skb->len); - - spin_unlock_bh(&stlc->tx_lock); - - if (!entry) { - /* - * The queue should be stopped before the firmware buffer - * is full, so firmware buffer should always have enough - * space. - * - * But I'm too lazy and omit it for now. - */ - if (net_ratelimit()) - stlc45xx_warning("firmware tx buffer full is full " - "for null frame"); - return -ENOSPC; - } - - payload = skb->data; - payload_len = skb->len; - padding = (int) (skb->data - sizeof(*data)) & 3; - entry->header_len = sizeof(*data) + padding; - - entry->skb = skb; - entry->status_needed = false; - entry->handle = (u32) skb; - entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME); - - stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B " - "padding %d header_len %d)", - entry->handle, payload, payload_len, padding, - entry->header_len); - stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len); - - data = (struct s_lm_data_out *) skb_push(skb, entry->header_len); - - memset(data, 0, entry->header_len); - - if (padding) - data->flags = LM_FLAG_ALIGN; - - data->flags = LM_OUT_BURST; - data->length = payload_len; - data->handle = entry->handle; - data->aid = 1; - data->rts_retries = 7; - data->retries = 7; - data->aloft_ctrl = 0; - data->crypt_offset = 58; - data->keytype = 0; - data->keylen = 0; - data->queue = LM_QUEUE_DATA3; - data->backlog = 32; - data->antenna = 2; - data->cts = 3; - data->power = 127; - - for (i = 0; i < 8; i++) - data->aloft[i] = 0; - - /* - * check if there's enough space in tx buffer - * - * FIXME: ignored for now - */ - - stlc45xx_tx_frame(stlc, entry->start, skb->data, skb->len); - - list_add(&entry->tx_list, &stlc->tx_sent); - - return 0; -} - -/* caller must hold mutex */ -static void stlc45xx_tx_psm(struct stlc45xx *stlc, bool enable) -{ - struct s_lm_control *control; - struct s_lmo_psm *psm; - size_t len, psm_len; - - WARN_ON(!stlc->associated); - WARN_ON(stlc->aid < 1); - WARN_ON(stlc->aid > 2007); - - psm_len = sizeof(*psm); - len = sizeof(*control) + psm_len; - control = kzalloc(len, GFP_KERNEL); - psm = (struct s_lmo_psm *) (control + 1); - - control->flags = LM_FLAG_CONTROL | LM_CTRL_OPSET; - control->length = psm_len; - control->oid = LM_OID_PSM; - - if (enable) - psm->flags |= LM_PSM; - - psm->aid = stlc->aid; - - psm->beacon_rcpi_skip_max = 60; - - psm->intervals[0].interval = 1; - psm->intervals[0].periods = 1; - psm->intervals[1].interval = 1; - psm->intervals[1].periods = 1; - psm->intervals[2].interval = 1; - psm->intervals[2].periods = 1; - psm->intervals[3].interval = 1; - psm->intervals[3].periods = 1; - - psm->nr = 0; - psm->exclude[0] = 0; - - stlc45xx_debug(DEBUG_PSM, "sending LM_OID_PSM (aid %d, interval %d)", - psm->aid, psm->intervals[0].interval); - - stlc45xx_tx_frame(stlc, FIRMWARE_CONFIG_START, control, len); - - kfree(control); -} - -static int stlc45xx_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) -{ - struct stlc45xx *stlc = hw->priv; - struct ieee80211_tx_info *info; - struct ieee80211_rate *rate; - int payload_len, padding, i; - struct s_lm_data_out *data; - struct txbuffer *entry; - char *payload; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - spin_lock_bh(&stlc->tx_lock); - - entry = stlc45xx_txbuffer_alloc(stlc, skb->len); - if (!entry) { - /* the queue should be stopped before the firmware buffer - * is full, so firmware buffer should always have enough - * space */ - if (net_ratelimit()) - stlc45xx_warning("firmware buffer full"); - spin_unlock_bh(&stlc->tx_lock); - return NETDEV_TX_BUSY; - } - - info = IEEE80211_SKB_CB(skb); - - payload = skb->data; - payload_len = skb->len; - padding = (int) (skb->data - sizeof(*data)) & 3; - entry->header_len = sizeof(*data) + padding; - - entry->skb = skb; - entry->status_needed = true; - entry->handle = (u32) skb; - entry->lifetime = jiffies + msecs_to_jiffies(TX_FRAME_LIFETIME); - - stlc45xx_debug(DEBUG_TX, "tx data 0x%x (0x%p payload %d B " - "padding %d header_len %d)", - entry->handle, payload, payload_len, padding, - entry->header_len); - stlc45xx_dump(DEBUG_TX_CONTENT, payload, payload_len); - - data = (struct s_lm_data_out *) skb_push(skb, entry->header_len); - - memset(data, 0, entry->header_len); - - if (padding) - data->flags = LM_FLAG_ALIGN; - - data->flags = LM_OUT_BURST; - data->length = payload_len; - data->handle = entry->handle; - data->aid = 1; - data->rts_retries = 7; - data->retries = 7; - data->aloft_ctrl = 0; - data->crypt_offset = 58; - data->keytype = 0; - data->keylen = 0; - data->queue = 2; - data->backlog = 32; - data->antenna = 2; - data->cts = 3; - data->power = 127; - - for (i = 0; i < 8; i++) { - rate = ieee80211_get_tx_rate(stlc->hw, info); - data->aloft[i] = rate->hw_value; - } - - list_add_tail(&entry->tx_list, &stlc->tx_pending); - - /* check if there's enough space in tx buffer */ - if (stlc45xx_txbuffer_find(stlc, MAX_FRAME_LEN) == -1) { - stlc45xx_debug(DEBUG_QUEUE, "tx buffer full, stopping queues"); - stlc->tx_queue_stopped = 1; - ieee80211_stop_queues(stlc->hw); - } - - queue_work(stlc->hw->workqueue, &stlc->work); - - spin_unlock_bh(&stlc->tx_lock); - - return NETDEV_TX_OK; -} - -static int stlc45xx_op_start(struct ieee80211_hw *hw) -{ - struct stlc45xx *stlc = hw->priv; - unsigned long timeout; - int ret = 0; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - stlc->fw_state = FW_STATE_BOOTING; - stlc->channel = 1; - - stlc45xx_power_on(stlc); - - ret = stlc45xx_upload_firmware(stlc); - if (ret < 0) { - stlc45xx_power_off(stlc); - goto out_unlock; - } - - stlc->tx_queue_stopped = 0; - - mutex_unlock(&stlc->mutex); - - timeout = msecs_to_jiffies(2000); - timeout = wait_for_completion_interruptible_timeout(&stlc->fw_comp, - timeout); - if (!timeout) { - stlc45xx_error("firmware boot failed"); - stlc45xx_power_off(stlc); - ret = -1; - goto out; - } - - stlc45xx_debug(DEBUG_BOOT, "firmware booted"); - - /* FIXME: should we take mutex just after wait_for_completion()? */ - mutex_lock(&stlc->mutex); - - WARN_ON(stlc->fw_state != FW_STATE_READY); - -out_unlock: - mutex_unlock(&stlc->mutex); - -out: - return ret; -} - -static void stlc45xx_op_stop(struct ieee80211_hw *hw) -{ - struct stlc45xx *stlc = hw->priv; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - WARN_ON(stlc->fw_state != FW_STATE_READY); - - stlc45xx_power_off(stlc); - - /* FIXME: make sure that all work_structs have completed */ - - spin_lock_bh(&stlc->tx_lock); - stlc45xx_flush_queues(stlc); - spin_unlock_bh(&stlc->tx_lock); - - stlc->fw_state = FW_STATE_OFF; - - mutex_unlock(&stlc->mutex); -} - -static int stlc45xx_op_add_interface(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf) -{ - struct stlc45xx *stlc = hw->priv; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - switch (conf->type) { - case NL80211_IFTYPE_STATION: - break; - default: - return -EOPNOTSUPP; - } - - memcpy(stlc->mac_addr, conf->mac_addr, ETH_ALEN); - - return 0; -} - -static void stlc45xx_op_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_if_init_conf *conf) -{ - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); -} - -static int stlc45xx_op_config(struct ieee80211_hw *hw, u32 changed) -{ - struct stlc45xx *stlc = hw->priv; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - mutex_lock(&stlc->mutex); - - stlc->channel = hw->conf.channel->hw_value; - stlc45xx_tx_scan(stlc); - stlc45xx_tx_setup(stlc); - stlc45xx_tx_edcf(stlc); - - if ((hw->conf.flags & IEEE80211_CONF_PS) != stlc->psm) { - stlc->psm = hw->conf.flags & IEEE80211_CONF_PS; - if (stlc->associated) { - stlc45xx_tx_psm(stlc, stlc->psm); - stlc45xx_tx_nullfunc(stlc, stlc->psm); - } - } - - mutex_unlock(&stlc->mutex); - - return 0; -} - -static void stlc45xx_op_configure_filter(struct ieee80211_hw *hw, - unsigned int changed_flags, - unsigned int *total_flags, - int mc_count, - struct dev_addr_list *mc_list) -{ - *total_flags = 0; -} - -static void stlc45xx_op_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u32 changed) -{ - struct stlc45xx *stlc = hw->priv; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - mutex_lock(&stlc->mutex); - - memcpy(stlc->bssid, info->bssid, ETH_ALEN); - stlc45xx_tx_setup(stlc); - - mutex_unlock(&stlc->mutex); - - if (changed & BSS_CHANGED_ASSOC) { - stlc->associated = info->assoc; - if (info->assoc) - stlc->aid = info->aid; - else - stlc->aid = -1; - - if (stlc->psm) { - stlc45xx_tx_psm(stlc, stlc->psm); - stlc45xx_tx_nullfunc(stlc, stlc->psm); - } - } -} - - -/* can't be const, mac80211 writes to this */ -static struct ieee80211_rate stlc45xx_rates[] = { - { .bitrate = 10, .hw_value = 0, .hw_value_short = 0, }, - { .bitrate = 20, .hw_value = 1, .hw_value_short = 1, }, - { .bitrate = 55, .hw_value = 2, .hw_value_short = 2, }, - { .bitrate = 110, .hw_value = 3, .hw_value_short = 3, }, - { .bitrate = 60, .hw_value = 4, .hw_value_short = 4, }, - { .bitrate = 90, .hw_value = 5, .hw_value_short = 5, }, - { .bitrate = 120, .hw_value = 6, .hw_value_short = 6, }, - { .bitrate = 180, .hw_value = 7, .hw_value_short = 7, }, - { .bitrate = 240, .hw_value = 8, .hw_value_short = 8, }, - { .bitrate = 360, .hw_value = 9, .hw_value_short = 9, }, - { .bitrate = 480, .hw_value = 10, .hw_value_short = 10, }, - { .bitrate = 540, .hw_value = 11, .hw_value_short = 11, }, -}; - -/* can't be const, mac80211 writes to this */ -static struct ieee80211_channel stlc45xx_channels[] = { - { .hw_value = 1, .center_freq = 2412}, - { .hw_value = 2, .center_freq = 2417}, - { .hw_value = 3, .center_freq = 2422}, - { .hw_value = 4, .center_freq = 2427}, - { .hw_value = 5, .center_freq = 2432}, - { .hw_value = 6, .center_freq = 2437}, - { .hw_value = 7, .center_freq = 2442}, - { .hw_value = 8, .center_freq = 2447}, - { .hw_value = 9, .center_freq = 2452}, - { .hw_value = 10, .center_freq = 2457}, - { .hw_value = 11, .center_freq = 2462}, - { .hw_value = 12, .center_freq = 2467}, - { .hw_value = 13, .center_freq = 2472}, -}; - -/* can't be const, mac80211 writes to this */ -static struct ieee80211_supported_band stlc45xx_band_2ghz = { - .channels = stlc45xx_channels, - .n_channels = ARRAY_SIZE(stlc45xx_channels), - .bitrates = stlc45xx_rates, - .n_bitrates = ARRAY_SIZE(stlc45xx_rates), -}; - -static const struct ieee80211_ops stlc45xx_ops = { - .start = stlc45xx_op_start, - .stop = stlc45xx_op_stop, - .add_interface = stlc45xx_op_add_interface, - .remove_interface = stlc45xx_op_remove_interface, - .config = stlc45xx_op_config, - .configure_filter = stlc45xx_op_configure_filter, - .tx = stlc45xx_op_tx, - .bss_info_changed = stlc45xx_op_bss_info_changed, -}; - -static int stlc45xx_register_mac80211(struct stlc45xx *stlc) -{ - /* FIXME: SET_IEEE80211_PERM_ADDR() requires default_mac_addr - to be non-const for some strange reason */ - static u8 default_mac_addr[ETH_ALEN] = { - 0x00, 0x02, 0xee, 0xc0, 0xff, 0xee - }; - int ret; - - SET_IEEE80211_PERM_ADDR(stlc->hw, default_mac_addr); - - ret = ieee80211_register_hw(stlc->hw); - if (ret) { - stlc45xx_error("unable to register mac80211 hw: %d", ret); - return ret; - } - - return 0; -} - -static void stlc45xx_device_release(struct device *dev) -{ - -} - -static struct platform_device stlc45xx_device = { - .name = "stlc45xx", - .id = -1, - - /* device model insists to have a release function */ - .dev = { - .release = stlc45xx_device_release, - }, -}; - -static int __devinit stlc45xx_probe(struct spi_device *spi) -{ - struct stlc45xx *stlc; - struct ieee80211_hw *hw; - int ret; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - /* mac80211 alloc */ - hw = ieee80211_alloc_hw(sizeof(*stlc), &stlc45xx_ops); - if (!hw) { - stlc45xx_error("could not alloc ieee80211_hw"); - ret = -ENOMEM; - goto out; - } - - /* mac80211 clears hw->priv */ - stlc = hw->priv; - - stlc->hw = hw; - dev_set_drvdata(&spi->dev, stlc); - stlc->spi = spi; - - spi->bits_per_word = 16; - spi->max_speed_hz = 24000000; - - ret = spi_setup(spi); - if (ret < 0) - stlc45xx_error("spi_setup failed"); - - ret = gpio_request(stlc45xx_gpio_power, "stlc45xx power"); - if (ret < 0) { - stlc45xx_error("power GPIO request failed: %d", ret); - return ret; - } - - ret = gpio_request(stlc45xx_gpio_irq, "stlc45xx irq"); - if (ret < 0) { - stlc45xx_error("irq GPIO request failed: %d", ret); - goto out; - } - - gpio_direction_output(stlc45xx_gpio_power, 0); - gpio_direction_input(stlc45xx_gpio_irq); - - ret = request_irq(gpio_to_irq(stlc45xx_gpio_irq), - stlc45xx_interrupt, IRQF_DISABLED, "stlc45xx", - stlc->spi); - if (ret < 0) - /* FIXME: handle the error */ - stlc45xx_error("request_irq() failed"); - - set_irq_type(gpio_to_irq(stlc45xx_gpio_irq), - IRQ_TYPE_EDGE_RISING); - - disable_irq(gpio_to_irq(stlc45xx_gpio_irq)); - - ret = platform_device_register(&stlc45xx_device); - if (ret) { - stlc45xx_error("Couldn't register wlan_omap device."); - return ret; - } - dev_set_drvdata(&stlc45xx_device.dev, stlc); - - INIT_WORK(&stlc->work, stlc45xx_work); - INIT_WORK(&stlc->work_reset, stlc45xx_work_reset); - INIT_DELAYED_WORK(&stlc->work_tx_timeout, stlc45xx_work_tx_timeout); - mutex_init(&stlc->mutex); - init_completion(&stlc->fw_comp); - spin_lock_init(&stlc->tx_lock); - INIT_LIST_HEAD(&stlc->txbuffer); - INIT_LIST_HEAD(&stlc->tx_pending); - INIT_LIST_HEAD(&stlc->tx_sent); - - hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | - IEEE80211_HW_SIGNAL_DBM | - IEEE80211_HW_NOISE_DBM; - /* four bytes for padding */ - hw->extra_tx_headroom = sizeof(struct s_lm_data_out) + 4; - - /* unit us */ - hw->channel_change_time = 1000; - - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &stlc45xx_band_2ghz; - - SET_IEEE80211_DEV(hw, &spi->dev); - - BUILD_BUG_ON(sizeof(default_cal_rssi) != RSSI_CAL_ARRAY_LEN); - BUILD_BUG_ON(sizeof(default_cal_channels) != CHANNEL_CAL_ARRAY_LEN); - - stlc->cal_rssi = kmemdup(default_cal_rssi, RSSI_CAL_ARRAY_LEN, - GFP_KERNEL); - stlc->cal_channels = kmemdup(default_cal_channels, - CHANNEL_CAL_ARRAY_LEN, - GFP_KERNEL); - - ret = device_create_file(&stlc45xx_device.dev, &dev_attr_cal_rssi); - if (ret < 0) { - stlc45xx_error("failed to create sysfs file cal_rssi"); - goto out; - } - - ret = device_create_file(&stlc45xx_device.dev, &dev_attr_cal_channels); - if (ret < 0) { - stlc45xx_error("failed to create sysfs file cal_channels"); - goto out; - } - - ret = device_create_file(&stlc45xx_device.dev, &dev_attr_tx_buf); - if (ret < 0) { - stlc45xx_error("failed to create sysfs file tx_buf"); - goto out; - } - - ret = stlc45xx_register_mac80211(stlc); - if (ret < 0) - goto out; - - stlc45xx_info("v" DRIVER_VERSION " loaded"); - - stlc45xx_info("config buffer 0x%x-0x%x", - FIRMWARE_CONFIG_START, FIRMWARE_CONFIG_END); - stlc45xx_info("tx 0x%x-0x%x, rx 0x%x-0x%x", - FIRMWARE_TXBUFFER_START, FIRMWARE_TXBUFFER_END, - FIRMWARE_RXBUFFER_START, FIRMWARE_RXBUFFER_END); - -out: - return ret; -} - -static int __devexit stlc45xx_remove(struct spi_device *spi) -{ - struct stlc45xx *stlc = dev_get_drvdata(&spi->dev); - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - platform_device_unregister(&stlc45xx_device); - - ieee80211_unregister_hw(stlc->hw); - - free_irq(gpio_to_irq(stlc45xx_gpio_irq), spi); - - gpio_free(stlc45xx_gpio_power); - gpio_free(stlc45xx_gpio_irq); - - /* FIXME: free cal_channels and cal_rssi? */ - - kfree(stlc->fw); - - mutex_destroy(&stlc->mutex); - - /* frees also stlc */ - ieee80211_free_hw(stlc->hw); - stlc = NULL; - - return 0; -} - - -static struct spi_driver stlc45xx_spi_driver = { - .driver = { - /* use cx3110x name because board-n800.c uses that for the - * SPI port */ - .name = "cx3110x", - .bus = &spi_bus_type, - .owner = THIS_MODULE, - }, - - .probe = stlc45xx_probe, - .remove = __devexit_p(stlc45xx_remove), -}; - -static int __init stlc45xx_init(void) -{ - int ret; - - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - ret = spi_register_driver(&stlc45xx_spi_driver); - if (ret < 0) { - stlc45xx_error("failed to register SPI driver: %d", ret); - goto out; - } - -out: - return ret; -} - -static void __exit stlc45xx_exit(void) -{ - stlc45xx_debug(DEBUG_FUNC, "%s", __func__); - - spi_unregister_driver(&stlc45xx_spi_driver); - - stlc45xx_info("unloaded"); -} - -module_init(stlc45xx_init); -module_exit(stlc45xx_exit); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); -MODULE_ALIAS("spi:cx3110x"); diff --git a/drivers/staging/stlc45xx/stlc45xx.h b/drivers/staging/stlc45xx/stlc45xx.h deleted file mode 100644 index ac96bbbde79..00000000000 --- a/drivers/staging/stlc45xx/stlc45xx.h +++ /dev/null @@ -1,283 +0,0 @@ -/* - * This file is part of stlc45xx - * - * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). - * - * Contact: Kalle Valo <kalle.valo@nokia.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA - * 02110-1301 USA - * - */ - -#include <linux/mutex.h> -#include <linux/list.h> -#include <net/mac80211.h> - -#include "stlc45xx_lmac.h" - -#define DRIVER_NAME "stlc45xx" -#define DRIVER_VERSION "0.1.3" - -#define DRIVER_PREFIX DRIVER_NAME ": " - -enum { - DEBUG_NONE = 0, - DEBUG_FUNC = 1 << 0, - DEBUG_IRQ = 1 << 1, - DEBUG_BH = 1 << 2, - DEBUG_RX = 1 << 3, - DEBUG_RX_CONTENT = 1 << 5, - DEBUG_TX = 1 << 6, - DEBUG_TX_CONTENT = 1 << 8, - DEBUG_TXBUFFER = 1 << 9, - DEBUG_QUEUE = 1 << 10, - DEBUG_BOOT = 1 << 11, - DEBUG_PSM = 1 << 12, - DEBUG_ALL = ~0, -}; - -#define DEBUG_LEVEL DEBUG_NONE -/* #define DEBUG_LEVEL DEBUG_ALL */ -/* #define DEBUG_LEVEL (DEBUG_TX | DEBUG_RX | DEBUG_IRQ) */ -/* #define DEBUG_LEVEL (DEBUG_TX | DEBUG_MEMREGION | DEBUG_QUEUE) */ -/* #define DEBUG_LEVEL (DEBUG_MEMREGION | DEBUG_QUEUE) */ - -#define stlc45xx_error(fmt, arg...) \ - printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) - -#define stlc45xx_warning(fmt, arg...) \ - printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) - -#define stlc45xx_info(fmt, arg...) \ - printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) - -#define stlc45xx_debug(level, fmt, arg...) \ - do { \ - if (level & DEBUG_LEVEL) \ - printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ - } while (0) - -#define stlc45xx_dump(level, buf, len) \ - do { \ - if (level & DEBUG_LEVEL) \ - print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \ - 16, 1, buf, len, 1); \ - } while (0) - -#define MAC2STR(a) ((a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]) -#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" - -/* Bit 15 is read/write bit; ON = READ, OFF = WRITE */ -#define ADDR_READ_BIT_15 0x8000 - -#define SPI_ADRS_ARM_INTERRUPTS 0x00 -#define SPI_ADRS_ARM_INT_EN 0x04 - -#define SPI_ADRS_HOST_INTERRUPTS 0x08 -#define SPI_ADRS_HOST_INT_EN 0x0c -#define SPI_ADRS_HOST_INT_ACK 0x10 - -#define SPI_ADRS_GEN_PURP_1 0x14 -#define SPI_ADRS_GEN_PURP_2 0x18 - -/* high word */ -#define SPI_ADRS_DEV_CTRL_STAT 0x26 - -#define SPI_ADRS_DMA_DATA 0x28 - -#define SPI_ADRS_DMA_WRITE_CTRL 0x2c -#define SPI_ADRS_DMA_WRITE_LEN 0x2e -#define SPI_ADRS_DMA_WRITE_BASE 0x30 - -#define SPI_ADRS_DMA_READ_CTRL 0x34 -#define SPI_ADRS_DMA_READ_LEN 0x36 -#define SPI_ADRS_DMA_READ_BASE 0x38 - -#define SPI_CTRL_STAT_HOST_OVERRIDE 0x8000 -#define SPI_CTRL_STAT_START_HALTED 0x4000 -#define SPI_CTRL_STAT_RAM_BOOT 0x2000 -#define SPI_CTRL_STAT_HOST_RESET 0x1000 -#define SPI_CTRL_STAT_HOST_CPU_EN 0x0800 - -#define SPI_DMA_WRITE_CTRL_ENABLE 0x0001 -#define SPI_DMA_READ_CTRL_ENABLE 0x0001 -#define HOST_ALLOWED (1 << 7) - -#define FIRMWARE_ADDRESS 0x20000 - -#define SPI_TIMEOUT 100 /* msec */ - -#define SPI_MAX_TX_PACKETS 32 - -#define SPI_MAX_PACKET_SIZE 32767 - -#define SPI_TARGET_INT_WAKEUP 0x00000001 -#define SPI_TARGET_INT_SLEEP 0x00000002 -#define SPI_TARGET_INT_RDDONE 0x00000004 - -#define SPI_TARGET_INT_CTS 0x00004000 -#define SPI_TARGET_INT_DR 0x00008000 - -#define SPI_HOST_INT_READY 0x00000001 -#define SPI_HOST_INT_WR_READY 0x00000002 -#define SPI_HOST_INT_SW_UPDATE 0x00000004 -#define SPI_HOST_INT_UPDATE 0x10000000 - -/* clear to send */ -#define SPI_HOST_INT_CTS 0x00004000 - -/* data ready */ -#define SPI_HOST_INT_DR 0x00008000 - -#define SPI_HOST_INTS_DEFAULT \ - (SPI_HOST_INT_READY | SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE) - -#define TARGET_BOOT_SLEEP 50 - -/* The firmware buffer is divided into three areas: - * - * o config area (for control commands) - * o tx buffer - * o rx buffer - */ -#define FIRMWARE_BUFFER_START 0x20200 -#define FIRMWARE_BUFFER_END 0x27c60 -#define FIRMWARE_BUFFER_LEN (FIRMWARE_BUFFER_END - FIRMWARE_BUFFER_START) -#define FIRMWARE_MTU 3240 -#define FIRMWARE_CONFIG_PAYLOAD_LEN 1024 -#define FIRMWARE_CONFIG_START FIRMWARE_BUFFER_START -#define FIRMWARE_CONFIG_LEN (sizeof(struct s_lm_control) + \ - FIRMWARE_CONFIG_PAYLOAD_LEN) -#define FIRMWARE_CONFIG_END (FIRMWARE_CONFIG_START + FIRMWARE_CONFIG_LEN - 1) -#define FIRMWARE_RXBUFFER_LEN (5 * FIRMWARE_MTU + 1024) -#define FIRMWARE_RXBUFFER_START (FIRMWARE_BUFFER_END - FIRMWARE_RXBUFFER_LEN) -#define FIRMWARE_RXBUFFER_END (FIRMWARE_RXBUFFER_START + \ - FIRMWARE_RXBUFFER_LEN - 1) -#define FIRMWARE_TXBUFFER_START (FIRMWARE_BUFFER_START + FIRMWARE_CONFIG_LEN) -#define FIRMWARE_TXBUFFER_LEN (FIRMWARE_BUFFER_LEN - FIRMWARE_CONFIG_LEN - \ - FIRMWARE_RXBUFFER_LEN) -#define FIRMWARE_TXBUFFER_END (FIRMWARE_TXBUFFER_START + \ - FIRMWARE_TXBUFFER_LEN - 1) - -#define FIRMWARE_TXBUFFER_HEADER 100 -#define FIRMWARE_TXBUFFER_TRAILER 4 - -/* FIXME: come up with a proper value */ -#define MAX_FRAME_LEN 2500 - -/* unit is ms */ -#define TX_FRAME_LIFETIME 2000 -#define TX_TIMEOUT 4000 - -#define SUPPORTED_CHANNELS 13 - -/* FIXME */ -/* #define CHANNEL_CAL_LEN offsetof(struct s_lmo_scan, bratemask) - \ */ -/* offsetof(struct s_lmo_scan, channel) */ -#define CHANNEL_CAL_LEN 292 -#define CHANNEL_CAL_ARRAY_LEN (SUPPORTED_CHANNELS * CHANNEL_CAL_LEN) -/* FIXME */ -/* #define RSSI_CAL_LEN sizeof(struct s_lmo_scan) - \ */ -/* offsetof(struct s_lmo_scan, rssical) */ -#define RSSI_CAL_LEN 8 -#define RSSI_CAL_ARRAY_LEN (SUPPORTED_CHANNELS * RSSI_CAL_LEN) - -struct s_dma_regs { - unsigned short cmd; - unsigned short len; - unsigned long addr; -}; - -struct stlc45xx_ie_tim { - u8 dtim_count; - u8 dtim_period; - u8 bmap_control; - u8 pvbmap[251]; -}; - -struct txbuffer { - /* can be removed when switched to skb queue */ - struct list_head tx_list; - - struct list_head buffer_list; - - int start; - int frame_start; - int end; - - struct sk_buff *skb; - u32 handle; - - bool status_needed; - - int header_len; - - /* unit jiffies */ - unsigned long lifetime; -}; - -enum fw_state { - FW_STATE_OFF, - FW_STATE_BOOTING, - FW_STATE_READY, - FW_STATE_RESET, - FW_STATE_RESETTING, -}; - -struct stlc45xx { - struct ieee80211_hw *hw; - struct spi_device *spi; - struct work_struct work; - struct work_struct work_reset; - struct delayed_work work_tx_timeout; - struct mutex mutex; - struct completion fw_comp; - - - u8 bssid[ETH_ALEN]; - u8 mac_addr[ETH_ALEN]; - int channel; - - u8 *cal_rssi; - u8 *cal_channels; - - enum fw_state fw_state; - - spinlock_t tx_lock; - - /* protected by tx_lock */ - struct list_head txbuffer; - - /* protected by tx_lock */ - struct list_head tx_pending; - - /* protected by tx_lock */ - int tx_queue_stopped; - - /* protected by mutex */ - struct list_head tx_sent; - - int tx_frames; - - u8 *fw; - int fw_len; - - bool psm; - bool associated; - int aid; - bool pspolling; -}; - - diff --git a/drivers/staging/stlc45xx/stlc45xx_lmac.h b/drivers/staging/stlc45xx/stlc45xx_lmac.h deleted file mode 100644 index af5db801347..00000000000 --- a/drivers/staging/stlc45xx/stlc45xx_lmac.h +++ /dev/null @@ -1,434 +0,0 @@ -/************************************************************************ -* This is the LMAC API interface header file for STLC4560. * -* Copyright (C) 2007 Conexant Systems, Inc. * -* This program is free software; you can redistribute it and/or * -* modify it under the terms of the GNU General Public License * -* as published by the Free Software Foundation; either version 2 * -* of the License, or (at your option) any later version. * -* * -* This program is distributed in the hope that it will be useful, * -* but WITHOUT ANY WARRANTY; without even the implied warranty of * -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * -* GNU General Public License for more details. * -* * -* You should have received a copy of the GNU General Public License * -* along with this program. If not, see <http://www.gnu.org/licenses/>.* -*************************************************************************/ - -#ifndef __lmac_h__ -#define __lmac_h__ - -#define LM_TOP_VARIANT 0x0506 -#define LM_BOTTOM_VARIANT 0x0506 - -/* - * LMAC - UMAC interface definition: - */ - -#define LM_FLAG_CONTROL 0x8000 -#define LM_FLAG_ALIGN 0x4000 - -#define LM_CTRL_OPSET 0x0001 - -#define LM_OUT_PROMISC 0x0001 -#define LM_OUT_TIMESTAMP 0x0002 -#define LM_OUT_SEQNR 0x0004 -#define LM_OUT_BURST 0x0010 -#define LM_OUT_NOCANCEL 0x0020 -#define LM_OUT_CLEARTIM 0x0040 -#define LM_OUT_HITCHHIKE 0x0080 -#define LM_OUT_COMPRESS 0x0100 -#define LM_OUT_CONCAT 0x0200 -#define LM_OUT_PCS_ACCEPT 0x0400 -#define LM_OUT_WAITEOSP 0x0800 - - -#define LM_ALOFT_SP 0x10 -#define LM_ALOFT_CTS 0x20 -#define LM_ALOFT_RTS 0x40 -#define LM_ALOFT_MASK 0x1f -#define LM_ALOFT_RATE 0x0f - -#define LM_IN_FCS_GOOD 0x0001 -#define LM_IN_MATCH_MAC 0x0002 -#define LM_IN_MCBC 0x0004 -#define LM_IN_BEACON 0x0008 -#define LM_IN_MATCH_BSS 0x0010 -#define LM_IN_BCAST_BSS 0x0020 -#define LM_IN_DATA 0x0040 -#define LM_IN_TRUNCATED 0x0080 - -#define LM_IN_TRANSPARENT 0x0200 - -#define LM_QUEUE_BEACON 0 -#define LM_QUEUE_SCAN 1 -#define LM_QUEUE_MGT 2 -#define LM_QUEUE_MCBC 3 -#define LM_QUEUE_DATA 4 -#define LM_QUEUE_DATA0 4 -#define LM_QUEUE_DATA1 5 -#define LM_QUEUE_DATA2 6 -#define LM_QUEUE_DATA3 7 - -#define LM_SETUP_INFRA 0x0001 -#define LM_SETUP_IBSS 0x0002 -#define LM_SETUP_TRANSPARENT 0x0008 -#define LM_SETUP_PROMISCUOUS 0x0010 -#define LM_SETUP_HIBERNATE 0x0020 -#define LM_SETUP_NOACK 0x0040 -#define LM_SETUP_RX_DISABLED 0x0080 - -#define LM_ANTENNA_0 0 -#define LM_ANTENNA_1 1 -#define LM_ANTENNA_DIVERSITY 2 - -#define LM_TX_FAILED 0x0001 -#define LM_TX_PSM 0x0002 -#define LM_TX_PSM_CANCELLED 0x0004 - -#define LM_SCAN_EXIT 0x0001 -#define LM_SCAN_TRAP 0x0002 -#define LM_SCAN_ACTIVE 0x0004 -#define LM_SCAN_FILTER 0x0008 - -#define LM_PSM 0x0001 -#define LM_PSM_DTIM 0x0002 -#define LM_PSM_MCBC 0x0004 -#define LM_PSM_CHECKSUM 0x0008 -#define LM_PSM_SKIP_MORE_DATA 0x0010 -#define LM_PSM_BEACON_TIMEOUT 0x0020 -#define LM_PSM_HFOSLEEP 0x0040 -#define LM_PSM_AUTOSWITCH_SLEEP 0x0080 -#define LM_PSM_LPIT 0x0100 -#define LM_PSM_BF_UCAST_SKIP 0x0200 -#define LM_PSM_BF_MCAST_SKIP 0x0400 - -/* hfosleep */ -#define LM_PSM_SLEEP_OPTION_MASK (LM_PSM_AUTOSWITCH_SLEEP | LM_PSM_HFOSLEEP) -#define LM_PSM_SLEEP_OPTION_SHIFT 6 -/* hfosleepend */ -#define LM_PSM_BF_OPTION_MASK (LM_PSM_BF_MCAST_SKIP | LM_PSM_BF_UCAST_SKIP) -#define LM_PSM_BF_OPTION_SHIFT 9 - - -#define LM_PRIVACC_WEP 0x01 -#define LM_PRIVACC_TKIP 0x02 -#define LM_PRIVACC_MICHAEL 0x04 -#define LM_PRIVACC_CCX_KP 0x08 -#define LM_PRIVACC_CCX_MIC 0x10 -#define LM_PRIVACC_AES_CCMP 0x20 - -/* size of s_lm_descr in words */ -#define LM_DESCR_SIZE_WORDS 11 - -#ifndef __ASSEMBLER__ - -enum { - LM_MODE_CLIENT = 0, - LM_MODE_AP -}; - -struct s_lm_descr { - uint16_t modes; - uint16_t flags; - uint32_t buffer_start; - uint32_t buffer_end; - uint8_t header; - uint8_t trailer; - uint8_t tx_queues; - uint8_t tx_depth; - uint8_t privacy; - uint8_t rx_keycache; - uint8_t tim_size; - uint8_t pad1; - uint8_t rates[16]; - uint32_t link; - uint16_t mtu; -}; - - -struct s_lm_control { - uint16_t flags; - uint16_t length; - uint32_t handle; - uint16_t oid; - uint16_t pad; - /* uint8_t data[]; */ -}; - -enum { - LM_PRIV_NONE = 0, - LM_PRIV_WEP, - LM_PRIV_TKIP, - LM_PRIV_TKIPMICHAEL, - LM_PRIV_CCX_WEPMIC, - LM_PRIV_CCX_KPMIC, - LM_PRIV_CCX_KP, - LM_PRIV_AES_CCMP -}; - -enum { - LM_DECRYPT_NONE, - LM_DECRYPT_OK, - LM_DECRYPT_NOKEY, - LM_DECRYPT_NOMICHAEL, - LM_DECRYPT_NOCKIPMIC, - LM_DECRYPT_FAIL_WEP, - LM_DECRYPT_FAIL_TKIP, - LM_DECRYPT_FAIL_MICHAEL, - LM_DECRYPT_FAIL_CKIPKP, - LM_DECRYPT_FAIL_CKIPMIC, - LM_DECRYPT_FAIL_AESCCMP -}; - -struct s_lm_data_out { - uint16_t flags; - uint16_t length; - uint32_t handle; - uint16_t aid; - uint8_t rts_retries; - uint8_t retries; - uint8_t aloft[8]; - uint8_t aloft_ctrl; - uint8_t crypt_offset; - uint8_t keytype; - uint8_t keylen; - uint8_t key[16]; - uint8_t queue; - uint8_t backlog; - uint16_t durations[4]; - uint8_t antenna; - uint8_t cts; - int16_t power; - uint8_t pad[2]; - /*uint8_t data[];*/ -}; - -#define LM_RCPI_INVALID (0xff) - -struct s_lm_data_in { - uint16_t flags; - uint16_t length; - uint16_t frequency; - uint8_t antenna; - uint8_t rate; - uint8_t rcpi; - uint8_t sq; - uint8_t decrypt; - uint8_t rssi_raw; - uint32_t clock[2]; - /*uint8_t data[];*/ -}; - -union u_lm_data { - struct s_lm_data_out out; - struct s_lm_data_in in; -}; - -enum { - LM_OID_SETUP = 0, - LM_OID_SCAN = 1, - LM_OID_TRAP = 2, - LM_OID_EDCF = 3, - LM_OID_KEYCACHE = 4, - LM_OID_PSM = 6, - LM_OID_TXCANCEL = 7, - LM_OID_TX = 8, - LM_OID_BURST = 9, - LM_OID_STATS = 10, - LM_OID_LED = 13, - LM_OID_TIMER = 15, - LM_OID_NAV = 20, - LM_OID_PCS = 22, - LM_OID_BT_BALANCER = 28, - LM_OID_GROUP_ADDRESS_TABLE = 30, - LM_OID_ARPTABLE = 31, - LM_OID_BT_OPTIONS = 35 -}; - -enum { - LM_FRONTEND_UNKNOWN = 0, - LM_FRONTEND_DUETTE3, - LM_FRONTEND_DUETTE2, - LM_FRONTEND_FRISBEE, - LM_FRONTEND_CROSSBOW, - LM_FRONTEND_LONGBOW -}; - - -#define INVALID_LPF_BANDWIDTH 0xffff -#define INVALID_OSC_START_DELAY 0xffff - -struct s_lmo_setup { - uint16_t flags; - uint8_t macaddr[6]; - uint8_t bssid[6]; - uint8_t antenna; - uint8_t rx_align; - uint32_t rx_buffer; - uint16_t rx_mtu; - uint16_t frontend; - uint16_t timeout; - uint16_t truncate; - uint32_t bratemask; - uint8_t sbss_offset; - uint8_t mcast_window; - uint8_t rx_rssi_threshold; - uint8_t rx_ed_threshold; - uint32_t ref_clock; - uint16_t lpf_bandwidth; - uint16_t osc_start_delay; -}; - - -struct s_lmo_scan { - uint16_t flags; - uint16_t dwell; - uint8_t channel[292]; - uint32_t bratemask; - uint8_t aloft[8]; - uint8_t rssical[8]; -}; - - -enum { - LM_TRAP_SCAN = 0, - LM_TRAP_TIMER, - LM_TRAP_BEACON_TX, - LM_TRAP_FAA_RADIO_ON, - LM_TRAP_FAA_RADIO_OFF, - LM_TRAP_RADAR, - LM_TRAP_NO_BEACON, - LM_TRAP_TBTT, - LM_TRAP_SCO_ENTER, - LM_TRAP_SCO_EXIT -}; - -struct s_lmo_trap { - uint16_t event; - uint16_t frequency; -}; - -struct s_lmo_timer { - uint32_t interval; -}; - -struct s_lmo_nav { - uint32_t period; -}; - - -struct s_lmo_edcf_queue; - -struct s_lmo_edcf { - uint8_t flags; - uint8_t slottime; - uint8_t sifs; - uint8_t eofpad; - struct s_lmo_edcf_queue { - uint8_t aifs; - uint8_t pad0; - uint16_t cwmin; - uint16_t cwmax; - uint16_t txop; - } queues[8]; - uint8_t mapping[4]; - uint16_t maxburst; - uint16_t round_trip_delay; -}; - -struct s_lmo_keycache { - uint8_t entry; - uint8_t keyid; - uint8_t address[6]; - uint8_t pad[2]; - uint8_t keytype; - uint8_t keylen; - uint8_t key[24]; -}; - - -struct s_lm_interval; - -struct s_lmo_psm { - uint16_t flags; - uint16_t aid; - struct s_lm_interval { - uint16_t interval; - uint16_t periods; - } intervals[4]; - /* uint16_t pad; */ - uint8_t beacon_rcpi_skip_max; - uint8_t rcpi_delta_threshold; - uint8_t nr; - uint8_t exclude[1]; -}; - -#define MC_FILTER_ADDRESS_NUM 4 - -struct s_lmo_group_address_table { - uint16_t filter_enable; - uint16_t num_address; - uint8_t macaddr_list[MC_FILTER_ADDRESS_NUM][6]; -}; - -struct s_lmo_txcancel { - uint32_t address[1]; -}; - - -struct s_lmo_tx { - uint8_t flags; - uint8_t retries; - uint8_t rcpi; - uint8_t sq; - uint16_t seqctrl; - uint8_t antenna; - uint8_t pad; -}; - -struct s_lmo_burst { - uint8_t flags; - uint8_t queue; - uint8_t backlog; - uint8_t pad; - uint16_t durations[32]; -}; - -struct s_lmo_stats { - uint32_t valid; - uint32_t fcs; - uint32_t abort; - uint32_t phyabort; - uint32_t rts_success; - uint32_t rts_fail; - uint32_t timestamp; - uint32_t time_tx; - uint32_t noisefloor; - uint32_t sample_noise[8]; - uint32_t sample_cca; - uint32_t sample_tx; -}; - - -struct s_lmo_led { - uint16_t flags; - uint16_t mask[2]; - uint16_t delay/*[2]*/; -}; - - -struct s_lmo_bt_balancer { - uint16_t prio_thresh; - uint16_t acl_thresh; -}; - - -struct s_lmo_arp_table { - uint16_t filter_enable; - uint32_t ipaddr; -}; - -#endif /* __ASSEMBLER__ */ - -#endif /* __lmac_h__ */ diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c index 3d2a84c4582..e139eaeaa17 100644 --- a/drivers/staging/vme/bridges/vme_ca91cx42.c +++ b/drivers/staging/vme/bridges/vme_ca91cx42.c @@ -25,6 +25,7 @@ #include <linux/poll.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/sched.h> #include <asm/time.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c index 8960fa9ee7a..00fe0803c21 100644 --- a/drivers/staging/vme/bridges/vme_tsi148.c +++ b/drivers/staging/vme/bridges/vme_tsi148.c @@ -25,6 +25,7 @@ #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/spinlock.h> +#include <linux/sched.h> #include <asm/time.h> #include <asm/io.h> #include <asm/uaccess.h> diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 7f96bcaf1c6..05186110c02 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -1332,7 +1332,6 @@ device_release_WPADEV(pDevice); free_netdev(pDevice->dev); } - kfree(pDevice); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_disconnect3.. \n"); } diff --git a/drivers/staging/winbond/Kconfig b/drivers/staging/winbond/Kconfig index 940460c39f3..132671d96d0 100644 --- a/drivers/staging/winbond/Kconfig +++ b/drivers/staging/winbond/Kconfig @@ -1,6 +1,6 @@ config W35UND tristate "IS89C35 WLAN USB driver" - depends on MAC80211 && WLAN_80211 && USB && EXPERIMENTAL + depends on MAC80211 && WLAN && USB && EXPERIMENTAL default n ---help--- This is highly experimental driver for Winbond WIFI card. diff --git a/drivers/staging/wlan-ng/Kconfig b/drivers/staging/wlan-ng/Kconfig index 9959b658c8c..f44294b0d8d 100644 --- a/drivers/staging/wlan-ng/Kconfig +++ b/drivers/staging/wlan-ng/Kconfig @@ -1,6 +1,6 @@ config PRISM2_USB tristate "Prism2.5/3 USB driver" - depends on WLAN_80211 && USB && WIRELESS_EXT + depends on WLAN && USB && WIRELESS_EXT default n ---help--- This is the wlan-ng prism 2.5/3 USB driver for a wide range of diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 33351312327..a18e3c5dd82 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -223,6 +223,7 @@ config USB_OTG config USB_GADGET_PXA25X boolean "PXA 25x or IXP 4xx" depends on (ARCH_PXA && PXA25x) || ARCH_IXP4XX + select USB_OTG_UTILS help Intel's PXA 25x series XScale ARM-5TE processors include an integrated full speed USB 1.1 device controller. The diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c index f37de283d0a..167cb2a8ece 100644 --- a/drivers/usb/gadget/ether.c +++ b/drivers/usb/gadget/ether.c @@ -61,11 +61,6 @@ * simpler, Microsoft pushes their own approach: RNDIS. The published * RNDIS specs are ambiguous and appear to be incomplete, and are also * needlessly complex. They borrow more from CDC ACM than CDC ECM. - * - * While CDC ECM, CDC Subset, and RNDIS are designed to extend the ethernet - * interface to the target, CDC EEM was designed to use ethernet over the USB - * link between the host and target. CDC EEM is implemented as an alternative - * to those other protocols when that communication model is more appropriate */ #define DRIVER_DESC "Ethernet Gadget" @@ -157,8 +152,8 @@ static inline bool has_rndis(void) #define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */ /* For EEM gadgets */ -#define EEM_VENDOR_NUM 0x0525 /* INVALID - NEEDS TO BE ALLOCATED */ -#define EEM_PRODUCT_NUM 0xa4a1 /* INVALID - NEEDS TO BE ALLOCATED */ +#define EEM_VENDOR_NUM 0x1d6b /* Linux Foundation */ +#define EEM_PRODUCT_NUM 0x0102 /* EEM Gadget */ /*-------------------------------------------------------------------------*/ diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 42a74b8a0bb..fa3d142ba64 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -2139,7 +2139,7 @@ static int fsl_proc_read(char *page, char **start, off_t off, int count, static void fsl_udc_release(struct device *dev) { complete(udc_controller->done); - dma_free_coherent(dev, udc_controller->ep_qh_size, + dma_free_coherent(dev->parent, udc_controller->ep_qh_size, udc_controller->ep_qh, udc_controller->ep_qh_dma); kfree(udc_controller); } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 3efa59b1804..b25cdea93a1 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1400,6 +1400,10 @@ iso_stream_schedule ( goto fail; } + period = urb->interval; + if (!stream->highspeed) + period <<= 3; + now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; /* when's the last uframe this urb could start? */ @@ -1417,8 +1421,8 @@ iso_stream_schedule ( /* Fell behind (by up to twice the slop amount)? */ if (start >= max - 2 * 8 * SCHEDULE_SLOP) - start += stream->interval * DIV_ROUND_UP( - max - start, stream->interval) - mod; + start += period * DIV_ROUND_UP( + max - start, period) - mod; /* Tried to schedule too far into the future? */ if (unlikely((start + sched->span) >= max)) { @@ -1441,10 +1445,6 @@ iso_stream_schedule ( /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ - period = urb->interval; - if (!stream->highspeed) - period <<= 3; - /* find a uframe slot with enough bandwidth */ for (; start < (stream->next_uframe + period); start++) { int enough_space; diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c index 749b5374282..e33d3625635 100644 --- a/drivers/usb/host/r8a66597-hcd.c +++ b/drivers/usb/host/r8a66597-hcd.c @@ -1003,19 +1003,20 @@ static void r8a66597_check_syssts(struct r8a66597 *r8a66597, int port, if (syssts == SE0) { r8a66597_write(r8a66597, ~ATTCH, get_intsts_reg(port)); r8a66597_bset(r8a66597, ATTCHE, get_intenb_reg(port)); - return; - } + } else { + if (syssts == FS_JSTS) + r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); + else if (syssts == LS_JSTS) + r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); - if (syssts == FS_JSTS) - r8a66597_bset(r8a66597, HSE, get_syscfg_reg(port)); - else if (syssts == LS_JSTS) - r8a66597_bclr(r8a66597, HSE, get_syscfg_reg(port)); + r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); + r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); - r8a66597_write(r8a66597, ~DTCH, get_intsts_reg(port)); - r8a66597_bset(r8a66597, DTCHE, get_intenb_reg(port)); + if (r8a66597->bus_suspended) + usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); + } - if (r8a66597->bus_suspended) - usb_hcd_resume_root_hub(r8a66597_to_hcd(r8a66597)); + usb_hcd_poll_rh_status(r8a66597_to_hcd(r8a66597)); } /* this function must be called with interrupt disabled */ @@ -1024,6 +1025,8 @@ static void r8a66597_usb_connect(struct r8a66597 *r8a66597, int port) u16 speed = get_rh_usb_speed(r8a66597, port); struct r8a66597_root_hub *rh = &r8a66597->root_hub[port]; + rh->port &= ~((1 << USB_PORT_FEAT_HIGHSPEED) | + (1 << USB_PORT_FEAT_LOWSPEED)); if (speed == HSMODE) rh->port |= (1 << USB_PORT_FEAT_HIGHSPEED); else if (speed == LSMODE) diff --git a/drivers/usb/host/whci/asl.c b/drivers/usb/host/whci/asl.c index c632437c764..562eba10881 100644 --- a/drivers/usb/host/whci/asl.c +++ b/drivers/usb/host/whci/asl.c @@ -115,6 +115,10 @@ static uint32_t process_qset(struct whc *whc, struct whc_qset *qset) if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); + /* A halted qTD always triggers an update + because the qset was either removed or + reactivated. */ + update |= WHC_UPDATE_UPDATED; goto done; } @@ -305,6 +309,7 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; + bool has_qtd = false; int ret; unsigned long flags; @@ -315,17 +320,21 @@ int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) + if (std->urb == urb) { + if (std->qtd) + has_qtd = true; qset_free_std(whc, std); - else + } else std->qtd = NULL; /* so this std is re-added when the qset is */ } - asl_qset_remove(whc, qset); - wurb->status = status; - wurb->is_async = true; - queue_work(whc->workqueue, &wurb->dequeue_work); - + if (has_qtd) { + asl_qset_remove(whc, qset); + wurb->status = status; + wurb->is_async = true; + queue_work(whc->workqueue, &wurb->dequeue_work); + } else + qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); diff --git a/drivers/usb/host/whci/pzl.c b/drivers/usb/host/whci/pzl.c index a9e05bac664..0db3fb2dc03 100644 --- a/drivers/usb/host/whci/pzl.c +++ b/drivers/usb/host/whci/pzl.c @@ -121,6 +121,10 @@ static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset) if (status & QTD_STS_HALTED) { /* Ug, an error. */ process_halted_qtd(whc, qset, td); + /* A halted qTD always triggers an update + because the qset was either removed or + reactivated. */ + update |= WHC_UPDATE_UPDATED; goto done; } @@ -333,6 +337,7 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) struct whc_urb *wurb = urb->hcpriv; struct whc_qset *qset = wurb->qset; struct whc_std *std, *t; + bool has_qtd = false; int ret; unsigned long flags; @@ -343,17 +348,22 @@ int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status) goto out; list_for_each_entry_safe(std, t, &qset->stds, list_node) { - if (std->urb == urb) + if (std->urb == urb) { + if (std->qtd) + has_qtd = true; qset_free_std(whc, std); - else + } else std->qtd = NULL; /* so this std is re-added when the qset is */ } - pzl_qset_remove(whc, qset); - wurb->status = status; - wurb->is_async = false; - queue_work(whc->workqueue, &wurb->dequeue_work); - + if (has_qtd) { + pzl_qset_remove(whc, qset); + update_pzl_hw_view(whc); + wurb->status = status; + wurb->is_async = false; + queue_work(whc->workqueue, &wurb->dequeue_work); + } else + qset_remove_urb(whc, qset, urb, status); out: spin_unlock_irqrestore(&whc->lock, flags); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 43c22702756..cd44c68954d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -315,6 +315,9 @@ static int option_resume(struct usb_serial *serial); #define QISDA_PRODUCT_H20_4515 0x4515 #define QISDA_PRODUCT_H20_4519 0x4519 +/* TLAYTECH PRODUCTS */ +#define TLAYTECH_VENDOR_ID 0x20B9 +#define TLAYTECH_PRODUCT_TEU800 0x1682 /* TOSHIBA PRODUCTS */ #define TOSHIBA_VENDOR_ID 0x0930 @@ -328,6 +331,9 @@ static int option_resume(struct usb_serial *serial); #define ALCATEL_VENDOR_ID 0x1bbb #define ALCATEL_PRODUCT_X060S 0x0000 +/* Airplus products */ +#define AIRPLUS_VENDOR_ID 0x1011 +#define AIRPLUS_PRODUCT_MCD650 0x3198 static struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, @@ -589,6 +595,8 @@ static struct usb_device_id option_ids[] = { { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, + { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, + { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 45883988a00..5019325ba25 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -296,7 +296,6 @@ struct sierra_port_private { int dsr_state; int dcd_state; int ri_state; - unsigned int opened:1; }; @@ -306,6 +305,8 @@ static int sierra_send_setup(struct usb_serial_port *port) struct sierra_port_private *portdata; __u16 interface = 0; int val = 0; + int do_send = 0; + int retval; dev_dbg(&port->dev, "%s\n", __func__); @@ -324,10 +325,7 @@ static int sierra_send_setup(struct usb_serial_port *port) */ if (port->interrupt_in_urb) { /* send control message */ - return usb_control_msg(serial->dev, - usb_rcvctrlpipe(serial->dev, 0), - 0x22, 0x21, val, interface, - NULL, 0, USB_CTRL_SET_TIMEOUT); + do_send = 1; } } @@ -339,12 +337,18 @@ static int sierra_send_setup(struct usb_serial_port *port) interface = 1; else if (port->bulk_out_endpointAddress == 5) interface = 2; - return usb_control_msg(serial->dev, - usb_rcvctrlpipe(serial->dev, 0), - 0x22, 0x21, val, interface, - NULL, 0, USB_CTRL_SET_TIMEOUT); + + do_send = 1; } - return 0; + if (!do_send) + return 0; + + usb_autopm_get_interface(serial->interface); + retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), + 0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT); + usb_autopm_put_interface(serial->interface); + + return retval; } static void sierra_set_termios(struct tty_struct *tty, @@ -773,8 +777,11 @@ static void sierra_close(struct usb_serial_port *port) if (serial->dev) { mutex_lock(&serial->disc_mutex); - if (!serial->disconnected) + if (!serial->disconnected) { + serial->interface->needs_remote_wakeup = 0; + usb_autopm_get_interface(serial->interface); sierra_send_setup(port); + } mutex_unlock(&serial->disc_mutex); spin_lock_irq(&intfdata->susp_lock); portdata->opened = 0; @@ -788,8 +795,6 @@ static void sierra_close(struct usb_serial_port *port) sierra_release_urb(portdata->in_urbs[i]); portdata->in_urbs[i] = NULL; } - usb_autopm_get_interface(serial->interface); - serial->interface->needs_remote_wakeup = 0; } } @@ -827,6 +832,8 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) if (err) { /* get rid of everything as in close */ sierra_close(port); + /* restore balance for autopm */ + usb_autopm_put_interface(serial->interface); return err; } sierra_send_setup(port); @@ -915,7 +922,7 @@ static void sierra_release(struct usb_serial *serial) #ifdef CONFIG_PM static void stop_read_write_urbs(struct usb_serial *serial) { - int i, j; + int i; struct usb_serial_port *port; struct sierra_port_private *portdata; @@ -923,8 +930,7 @@ static void stop_read_write_urbs(struct usb_serial *serial) for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); - for (j = 0; j < N_IN_URB; j++) - usb_kill_urb(portdata->in_urbs[j]); + sierra_stop_rx_urbs(port); usb_kill_anchored_urbs(&portdata->active); } } diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index 3a4fb023af7..589f6b4404f 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c @@ -696,7 +696,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) /* device supports and needs bigger sense buffer */ if (us->fflags & US_FL_SANE_SENSE) sense_size = ~0; - +Retry_Sense: US_DEBUGP("Issuing auto-REQUEST_SENSE\n"); scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size); @@ -720,6 +720,21 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us) srb->result = DID_ABORT << 16; goto Handle_Errors; } + + /* Some devices claim to support larger sense but fail when + * trying to request it. When a transport failure happens + * using US_FS_SANE_SENSE, we always retry with a standard + * (small) sense request. This fixes some USB GSM modems + */ + if (temp_result == USB_STOR_TRANSPORT_FAILED && + (us->fflags & US_FL_SANE_SENSE) && + sense_size != US_SENSE_SIZE) { + US_DEBUGP("-- auto-sense failure, retry small sense\n"); + sense_size = US_SENSE_SIZE; + goto Retry_Sense; + } + + /* Other failures */ if (temp_result != USB_STOR_TRANSPORT_GOOD) { US_DEBUGP("-- auto-sense failure\n"); diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c index b2f149fedcc..4516c36436e 100644 --- a/drivers/usb/wusbcore/security.c +++ b/drivers/usb/wusbcore/security.c @@ -200,35 +200,40 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, { int result, bytes, secd_size; struct device *dev = &usb_dev->dev; - struct usb_security_descriptor secd; + struct usb_security_descriptor *secd; const struct usb_encryption_descriptor *etd, *ccm1_etd = NULL; - void *secd_buf; const void *itr, *top; char buf[64]; + secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL); + if (secd == NULL) { + result = -ENOMEM; + goto out; + } + result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, - 0, &secd, sizeof(secd)); + 0, secd, sizeof(struct usb_security_descriptor)); if (result < sizeof(secd)) { dev_err(dev, "Can't read security descriptor or " "not enough data: %d\n", result); - goto error_secd; + goto out; } - secd_size = le16_to_cpu(secd.wTotalLength); - secd_buf = kmalloc(secd_size, GFP_KERNEL); - if (secd_buf == NULL) { + secd_size = le16_to_cpu(secd->wTotalLength); + secd = krealloc(secd, secd_size, GFP_KERNEL); + if (secd == NULL) { dev_err(dev, "Can't allocate space for security descriptors\n"); - goto error_secd_alloc; + goto out; } result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, - 0, secd_buf, secd_size); + 0, secd, secd_size); if (result < secd_size) { dev_err(dev, "Can't read security descriptor or " "not enough data: %d\n", result); - goto error_secd_all; + goto out; } bytes = 0; - itr = secd_buf + sizeof(secd); - top = secd_buf + result; + itr = &secd[1]; + top = (void *)secd + result; while (itr < top) { etd = itr; if (top - itr < sizeof(*etd)) { @@ -259,24 +264,16 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc, dev_err(dev, "WUSB device doesn't support CCM1 encryption, " "can't use!\n"); result = -EINVAL; - goto error_no_ccm1; + goto out; } wusb_dev->ccm1_etd = *ccm1_etd; dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", buf, wusb_et_name(ccm1_etd->bEncryptionType), ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); result = 0; - kfree(secd_buf); out: + kfree(secd); return result; - - -error_no_ccm1: -error_secd_all: - kfree(secd_buf); -error_secd_alloc: -error_secd: - goto out; } void wusb_dev_sec_rm(struct wusb_dev *wusb_dev) diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 200c22f5513..9dd58804288 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -19,7 +19,6 @@ */ //#define DEBUG #include <linux/virtio.h> -#include <linux/virtio_ids.h> #include <linux/virtio_balloon.h> #include <linux/swap.h> #include <linux/kthread.h> @@ -248,7 +247,7 @@ out: return err; } -static void virtballoon_remove(struct virtio_device *vdev) +static void __devexit virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 4a1f1ebff7b..28d9cf7cf72 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -530,19 +530,22 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, err = PTR_ERR(vqs[i]); goto error_find; } + + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + continue; + /* allocate per-vq irq if available and necessary */ - if (vp_dev->per_vq_vectors) { - snprintf(vp_dev->msix_names[msix_vec], - sizeof *vp_dev->msix_names, - "%s-%s", - dev_name(&vp_dev->vdev.dev), names[i]); - err = request_irq(msix_vec, vring_interrupt, 0, - vp_dev->msix_names[msix_vec], - vqs[i]); - if (err) { - vp_del_vq(vqs[i]); - goto error_find; - } + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", + dev_name(&vp_dev->vdev.dev), names[i]); + err = request_irq(vp_dev->msix_entries[msix_vec].vector, + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) { + vp_del_vq(vqs[i]); + goto error_find; } } return 0; diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index f5360058072..fbd2ecde93e 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -285,6 +285,9 @@ static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) return NULL; } + /* Only get used array entries after they have been exposed by host. */ + rmb(); + i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id; *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len; |